id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
314517 | """Huduma URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from HudumaApp.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('',homepage,name='homepage'),
path('about/',aboutpage,name='aboutpage'),
path('login/',loginpage,name='loginpage'),
path('createaccount',createaccountpage,name='createaccountpage'),
path('admin_login/',Login_admin,name='login_admin'),
path('adminhome/',AdminHome,name='adminhome'),
path('adminlogout/',Logout_admin,name='adminlogout'),
path('adminaddDoctor/',adminaddDoctor,name='adminaddDoctor'),
path('adminviewDoctor/',adminviewDoctor,name='adminviewDoctor'),
path('adminDeleteDoctor<int:pid><str:email>',admin_delete_doctor,name='admin_delete_doctor'),
path('adminaddReceptionist/',adminaddReceptionist,name='adminaddReceptionist'),
path('adminviewReceptionist/',adminviewReceptionist,name='adminviewReceptionist'),
path('adminDeleteReceptionist<int:pid>,<str:email>',admin_delete_receptionist,name='admin_delete_receptionist'),
path('adminviewAppointment/',adminviewAppointment,name='adminviewAppointment'),
path('home/',Home,name='home'),
path('profile/',profile,name='profile'),
path('makeappointments/',MakeAppointments,name='makeappointments'),
path('viewappointments/',viewappointments,name='viewappointments'),
path('PatientDeleteAppointment<int:pid>',patient_delete_appointment,name='patient_delete_appointment'),
path('logout/',Logout,name='logout'),
]
| StarcoderdataPython |
215571 | <filename>yatamana/slurm_task_manager.py
from __future__ import (
print_function, division, absolute_import, unicode_literals)
import logging
from math import ceil
from collections import OrderedDict
from .utils import which
from .task_manager import TaskManager
def format_time(seconds):
"""Format time interval the slurm way.
Parameters
----------
seconds : int
Number of seconds.
Returns
-------
s : string
Formatted time interval.
"""
seconds = int(ceil(seconds))
minutes = seconds // 60
seconds -= minutes * 60
hours = minutes // 60
minutes -= hours * 60
days = hours // 24
hours -= days * 24
return '%02d-%02d:%02d:%02d' % (days, hours, minutes, seconds)
class SlurmTaskManager(TaskManager):
"""Task manager for Slurm.
"""
default_submit_command = which('sbatch')
def __init__(self, setup_file, **kwargs):
super(SlurmTaskManager, self).__init__(setup_file, **kwargs)
def map_opts(self, opts):
"""Map resolved task options into slurm-specific options.
Parameters
----------
opts : dict-like
Resolved options to be mapped.
Returns
-------
mapped : dict-like
Options mapped into command-line options for sbatch.
"""
log = logging.getLogger(self.__class__.__name__)
mapped = OrderedDict()
for name, value in opts.items():
if name == 'raw':
mapped['raw'] = value
elif name == 'current_working_directory':
mapped[name] = ['-D', '.']
elif name == 'log_filename':
value = value.replace('%(job_id)s', '%j')
mapped[name] = ['-o', value]
elif name == 'name':
mapped[name] = ['-J', value]
elif name == 'walltime':
mapped[name] = ['-t', format_time(value)]
elif name == 'qos':
mapped[name] = ['--qos=' + value]
elif name == 'cores':
mapped[name] = ['-c', str(value), '-N', '1-1']
elif name == 'memory':
mapped[name] = ['--mem=%dG' % value]
elif name == 'dependencies':
mapped[name] = ['-d', ':'.join(
['afterok'] + [str(job_id) for job_id in value])]
elif name == 'modules':
pass
else:
log.error('Cannot map option: %s', name)
return mapped
def get_job_id(self, output):
"""Get job ID from the submission ouput.
Parameters
----------
output : string
Output of sbatch.
Returns
-------
job_id : int
Extracted job ID.
"""
return int(output.split(' ')[3])
| StarcoderdataPython |
11254273 | from .base_trainer import BaseTrainer
from .base_dataloader import BaseDataLoader | StarcoderdataPython |
12820544 | <gh_stars>0
# -- coding:utf-8--
import multiprocessing
from urllib import request
import os
import shutil
import zipfile
import time
import datetime
import json
import sys
import socket
import uuid
import ssl
from mods import install_v2,lod_del,tool_win,update,lod_del,user,s_server,X,strat_v2,Aaes
ssl._create_default_https_context = ssl._create_unverified_context
def strat():
#检测更新
update.update()
#检测是否正确安装v2ray
if install_v2.t_v2():
#检测时候有保存uuid
zt, user_id = user.test_user_f()
if zt:
#发送uuid进行验证
sr_re,sr_time,sr_json,sr_gg,sr_x = s_server.mod_1(user_id)
#是否允许连接
if sr_re:
#发布日志
X.gg(sr_gg)
X.dtml_srjc(sr_x)
#启动程序
strat_v2.start_v2(sr_json)
else:
#延长时间模式
s_server.mod_2(user_id)
strat()
else:
#生成uuid并写入
user.write_user_id()
#进行注册
s_server.mod_3(user_id)
strat()
else:
print("您没有安装或没有正确安装v2ray!")
try:
#删除文件的函数
install_v2.d_v2_s()
print("正在重新安装v2")
#创建路径的函数
install_v2.addlj()
#安装v2的函数
install_v2.get_v2ray()
strat()
except:
print("正在安装v2")
#创建路径的函数
install_v2.addlj()
#安装v2的函数
install_v2.get_v2ray()
strat()
strat() | StarcoderdataPython |
3553750 | <filename>corehq/apps/reportfixtures/tests/test_indicator_fixture.py<gh_stars>1-10
from datetime import date, timedelta
from xml.etree import ElementTree
from sqlagg import SumColumn, filters
from sqlagg.base import AliasColumn
from sqlagg.columns import SimpleColumn
from corehq.apps.reports.sqlreport import AggregateColumn
from casexml.apps.case.tests.util import check_xml_line_by_line
from corehq.apps.reportfixtures.fixturegenerators import gen_fixture
from corehq.apps.reportfixtures.indicator_sets import SqlIndicatorSet
from corehq.apps.reportfixtures.tests.sql_fixture import load_data
from corehq.apps.reports.sqlreport import DatabaseColumn
from corehq.apps.users.models import CommCareUser
from django.test import TestCase
def _percentage(num, denom):
if num is not None and denom is not None:
return num / denom
return 0
class CallCenter(SqlIndicatorSet):
"""
Assumes SQL table with the following columns:
* case (string): the case id
* date (date): the date of the indicator grain
* cases_updated (integer): number of cases updated by on date
"""
name = 'call_center'
table_name = 'call_center'
def __init__(self, domain, user, group=None, keys=None):
super(CallCenter, self).__init__(domain, user)
self.group = group
self.test_keys = keys
@property
def filters(self):
return ['date between :weekago and :today']
@property
def filter_values(self):
return {
'today': date.today() - timedelta(days=1),
'weekago': date.today() - timedelta(days=7),
'2weekago': date.today() - timedelta(days=14),
}
@property
def group_by(self):
return [self.group] if self.group else []
@property
def keys(self):
return self.test_keys
@property
def columns(self):
cols = [DatabaseColumn("case", SimpleColumn('case'), format_fn=self.map_case, sortable=False)] if self.group_by else []
return cols + [
DatabaseColumn('casesUpdatedInLastWeek', SumColumn('cases_updated'), sortable=False),
DatabaseColumn('casesUpdatedInWeekPrior', SumColumn('cases_updated',
filters=[filters.GTE('date', '2weekago'), filters.LT('date', 'weekago')],
alias='casesUpdatedInWeekPrior'),
sortable=False),
AggregateColumn('averageDurationPerCase', _percentage,
[SumColumn('duration'), AliasColumn('cases_updated')],
sortable=False)
]
def map_case(self, value):
return value[::-1]
class IndicatorFixtureTest(TestCase):
@classmethod
def setUpClass(cls):
load_data()
cls.user = CommCareUser.create('qwerty', 'rudolph', '***')
cls.config = dict(columns=['casesUpdatedInLastWeek', 'casesUpdatedInWeekPrior'])
@classmethod
def tearDownClass(cls):
cls.user.delete()
def test_callcenter_group(self):
fixture = gen_fixture(self.user, CallCenter('domain', 'user', 'case'))
check_xml_line_by_line(self, """
<fixture id="indicators:call_center" user_id="{userid}">
<indicators>
<case id="321">
<casesUpdatedInLastWeek>3</casesUpdatedInLastWeek>
<casesUpdatedInWeekPrior>4</casesUpdatedInWeekPrior>
<averageDurationPerCase>7</averageDurationPerCase>
</case>
</indicators>
</fixture>
""".format(userid=self.user.user_id), ElementTree.tostring(fixture))
def test_callcenter_no_group(self):
fixture = gen_fixture(self.user, CallCenter('domain', 'user'))
check_xml_line_by_line(self, """
<fixture id="indicators:call_center" user_id="{userid}">
<indicators>
<casesUpdatedInLastWeek>3</casesUpdatedInLastWeek>
<casesUpdatedInWeekPrior>4</casesUpdatedInWeekPrior>
<averageDurationPerCase>7</averageDurationPerCase>
</indicators>
</fixture>
""".format(userid=self.user.user_id), ElementTree.tostring(fixture))
def test_callcenter_keys(self):
fixture = gen_fixture(self.user, CallCenter('domain', 'user', 'case', [['123'], ['456']]))
check_xml_line_by_line(self, """
<fixture id="indicators:call_center" user_id="{userid}">
<indicators>
<case id="321">
<casesUpdatedInLastWeek>3</casesUpdatedInLastWeek>
<casesUpdatedInWeekPrior>4</casesUpdatedInWeekPrior>
<averageDurationPerCase>7</averageDurationPerCase>
</case>
<case id="654">
<casesUpdatedInLastWeek>0</casesUpdatedInLastWeek>
<casesUpdatedInWeekPrior>0</casesUpdatedInWeekPrior>
<averageDurationPerCase>0</averageDurationPerCase>
</case>
</indicators>
</fixture>
""".format(userid=self.user.user_id), ElementTree.tostring(fixture))
| StarcoderdataPython |
9790617 | #! /usr/bin/env python3
def process(population, days) :
total = [population.count(i) for i in range(days)]
for day in range(days - 7) : total[day + 7] += total[day] + total[day - 2] * (day >= 2)
return sum(total) + sum(population.count(i) for i in range(days))
print(process(list(map(int, open("input").readline().split(","))), 256)) | StarcoderdataPython |
3491586 | import logging
import copy
import re
import avi.migrationtools.f5_converter.converter_constants as conv_const
from avi.migrationtools.f5_converter.conversion_util import F5Util
from avi.migrationtools.avi_migration_utils import update_count
LOG = logging.getLogger(__name__)
# Creating f5 object for util library.
conv_utils = F5Util()
class PoolConfigConv(object):
@classmethod
def get_instance(cls, version, f5_pool_attributes, prefix):
"""
:param version: f5 version
:param f5_pool_attributes: location of yaml file for supported
attributes
:param prefix: prefix for objects
:return:
"""
if version == '10':
return PoolConfigConvV10(f5_pool_attributes, prefix)
if version in ['11', '12']:
return PoolConfigConvV11(f5_pool_attributes, prefix)
def convert_pool(self, pool_name, f5_config, avi_config, user_ignore,
tenant_ref, cloud_ref, merge_object_mapping, sys_dict,
vrf=None, segroup=None):
pass
def convert(self, f5_config, avi_config, user_ignore, tenant_ref,
cloud_name, merge_object_mapping, sys_dict, vrf=None,
segroup=None):
"""
:param f5_config: parsed f5 config dict
:param avi_config: dict for avi conversion
:param user_ignore: Ignore config defined by user
:param tenant_ref: tenant for which config need to be converted
:param cloud_name: cloud for which config need to be converted
:param merge_object_mapping: flag for merge object
:param sys_dict: baseline profile dict
:return:
"""
pool_list = []
pool_config = f5_config.get('pool', {})
user_ignore = user_ignore.get('pool', {})
avi_config['VrfContext'] = []
avi_config['PoolGroup'] = []
avi_config['PriorityLabels'] = {}
# Initialize Global vrf context object
vrf_context = {
"name": 'global',
"system_default": True,
"tenant_ref": conv_utils.get_object_ref('admin', 'tenant'),
"cloud_ref": conv_utils.get_object_ref(cloud_name, 'cloud'),
"static_routes": []
}
avi_config['VrfContext'].append(vrf_context)
total_size = len(pool_config.keys())
# Added variable to get total object count.
progressbar_count = 0
print "Converting Pools..."
for pool_name in pool_config.keys():
progressbar_count += 1
LOG.debug("Converting Pool: %s" % pool_name)
f5_pool = pool_config[pool_name]
if not f5_pool:
msg = "Empty pool skipped for conversion :%s" % pool_name
LOG.debug(msg)
conv_utils.add_status_row('pool', None, pool_name,
conv_const.STATUS_SKIPPED, msg)
continue
if 'gateway-failsafe-device' in f5_pool:
msg = ("Not supported gateway-failsafe-device, pool skipped "
"for conversion :%s" % pool_name)
LOG.debug(msg)
conv_utils.add_status_row('pool', None, pool_name,
conv_const.STATUS_SKIPPED, msg)
continue
try:
converted_objs = self.convert_pool(
pool_name, f5_config, avi_config, user_ignore, tenant_ref,
cloud_name, merge_object_mapping, sys_dict, vrf, segroup)
pool_list += converted_objs['pools']
if 'pg_obj' in converted_objs:
avi_config['PoolGroup'].extend(converted_objs['pg_obj'])
LOG.debug("Conversion successful for Pool: %s" % pool_name)
except:
update_count('error')
LOG.error("Failed to convert pool: %s" % pool_name,
exc_info=True)
conv_utils.add_status_row('pool', None, pool_name,
conv_const.STATUS_ERROR)
# Added call to check progress.
msg = "Pool and PoolGroup conversion started..."
conv_utils.print_progress_bar(progressbar_count, total_size, msg,
prefix='Progress', suffix='')
avi_config['Pool'] = pool_list
LOG.debug("Converted %s pools" % len(pool_list))
f5_config.pop('pool', {})
def get_monitor_refs(self, monitor_names, monitor_config_list, pool_name,
tenant_ref, merge_object_mapping, sys_mon):
"""
:param monitor_names: name of monitor
:param monitor_config_list: parsed dict of monitor_config_list
:param pool_name: name of pool
:param tenant_ref: tenant which need to be converted
:param merge_object_mapping: flag for object merge
:param sys_mon: baseline profile dict
:return:
"""
skipped_monitors = []
monitors = monitor_names.split(" ")
monitor_refs = []
garbage_val = ["and", "all", "min", "of", "{", "}", "none"]
for monitor in monitors:
monitor = monitor.strip()
if not monitor or monitor in garbage_val or \
monitor.isdigit():
continue
if self.prefix:
monitor = '%s-%s' % (self.prefix, monitor)
tenant, monitor = conv_utils.get_tenant_ref(monitor)
monitor_obj = [ob for ob in sys_mon if ob['name'] ==
merge_object_mapping['health_monitor'].get(monitor)] \
or [obj for obj in monitor_config_list if (
obj["name"] == monitor or monitor in
obj.get("dup_of", []))]
if monitor_obj:
tenant = conv_utils.get_name(
monitor_obj[0]['tenant_ref'])
monitor_refs.append(conv_utils.get_object_ref(monitor_obj[0]['name'],
'healthmonitor', tenant=tenant))
else:
LOG.warning("Monitor not found: %s for pool %s" %
(monitor, pool_name))
skipped_monitors.append(monitor)
return skipped_monitors, monitor_refs
def create_pool_object(self, name, desc, servers, pd_action, algo,
ramp_time, limits, tenant_ref, cloud_ref):
"""
:param name: name of pool
:param desc: description of pool
:param servers: servers list in pool
:param pd_action: action on avi pool
:param algo: algorithm used for pool
:param tenant_ref: tenant of which output to be converted
:param cloud_ref: cloud of which output to be converted
:return: pool_obj
"""
tenant, name = conv_utils.get_tenant_ref(name)
# Added prefix for objects
if self.prefix:
name = self.prefix + '-' + name
pool_obj = {
'name': name,
'description': desc,
'servers': servers,
'fail_action': pd_action,
'lb_algorithm': algo,
'cloud_ref': conv_utils.get_object_ref(cloud_ref, 'cloud')
}
if not tenant_ref == 'admin':
tenant = tenant_ref
pool_obj['tenant_ref'] = conv_utils.get_object_ref(tenant, 'tenant')
if ramp_time:
pool_obj['connection_ramp_duration'] = ramp_time
if limits.get('connection_limit', 0) > 0:
pool_obj['max_concurrent_connections_per_server'] = \
limits['connection_limit']
if limits.get('rate_limit', 0) > 0:
pool_obj['max_conn_rate_per_server'] = {
'count': limits['rate_limit']
}
return pool_obj
def check_for_pool_group(self, servers):
"""
Check if the priority group for the server exist
:param servers: List of servers to check server priority
:return: if priority exist returns true and priority wise
dict of servers
"""
is_pool_group = False
for server in servers:
if 'priority' in server:
is_pool_group = True
break
if not is_pool_group:
return is_pool_group, None
pg_dict = dict()
for server in servers:
priority = server.get('priority', None)
if not priority:
is_pool_group = False
break
else:
del server['priority']
priority_list = pg_dict.get(priority, [])
priority_list.append(server)
pg_dict[priority] = priority_list
return is_pool_group, pg_dict
def add_status(self, name, skipped_attr, member_skipped, skipped_monitors,
converted_objs, user_ignore, skipped_servers):
skipped = []
conv_status = dict()
conv_status['user_ignore'] = []
if skipped_attr:
p_ignore = user_ignore.get('pool', [])
conv_status['user_ignore'] = [val for val in skipped_attr
if val in p_ignore]
skipped_attr = [attr for attr in skipped_attr
if attr not in p_ignore]
if skipped_attr:
skipped.append(skipped_attr)
if member_skipped:
m_ignore = user_ignore.get('members', [])
if m_ignore:
ms_new = []
um_list = []
for obj in member_skipped:
um_skipped = dict()
um_skipped[obj.keys()[0]] = \
[val for val in obj[obj.keys()[0]] if val in m_ignore]
temp = [val for val in obj[obj.keys()[0]]
if val not in m_ignore]
if um_skipped[um_skipped.keys()[0]]:
um_list.append(um_skipped)
if temp:
ms_new.append({obj.keys()[0]: temp})
conv_status['user_ignore'].append(um_list)
if ms_new:
skipped.append(ms_new)
else:
skipped.append(member_skipped)
if skipped_monitors and not user_ignore.get('monitor', None):
skipped.append({"monitor": skipped_monitors})
if skipped_servers:
skipped.append({"server": skipped_servers})
conv_status['skipped'] = skipped
status = conv_const.STATUS_SUCCESSFUL
if skipped:
status = conv_const.STATUS_PARTIAL
conv_status['status'] = status
conv_utils.add_conv_status('pool', None, name, conv_status,
converted_objs)
def convert_for_pg(self, pg_dict, pool_obj, name, tenant, avi_config,
cloud_ref):
"""
Creates a pool group object
:param pg_dict: priority wise sorted dict of pools
:param pool_obj: Converted f5 pool object
:param name: name of the pool
:param tenant: tenant name for tenant reference
:param avi_config: Avi config to add temporary labels
:return:
"""
pg_members = []
pools = []
for priority in pg_dict:
priority_pool = copy.deepcopy(pool_obj)
priority_pool['servers'] = pg_dict[priority]
priority_pool_ref = '%s-%s' % (name, priority)
# Added prefix for objects
if self.prefix:
priority_pool_ref = self.prefix + '-' + priority_pool_ref
priority_pool['name'] = priority_pool_ref
pools.append(priority_pool)
if priority_pool_ref:
member = {
'pool_ref': conv_utils.get_object_ref(
priority_pool_ref, 'pool', tenant=tenant,
cloud_name=cloud_ref),
'priority_label': priority
}
pg_members.append(member)
# Added prefix for objects
if self.prefix:
name = self.prefix + "-" + name
pg_obj = {
'name': name,
'members': pg_members,
'cloud_ref': conv_utils.get_object_ref(cloud_ref, 'cloud')
}
pg_obj['tenant_ref'] = conv_utils.get_object_ref(tenant, 'tenant')
converted_objs = {
'pools': pools,
'pg_obj': [pg_obj]
}
return converted_objs
class PoolConfigConvV11(PoolConfigConv):
def __init__(self, f5_pool_attributes, prefix):
"""
:param f5_pool_attributes: f5 pool attributes from yaml file
:param prefix: prefix for objects
"""
self.supported_attr = f5_pool_attributes['Pool_supported_attr']
self.supported_attributes = f5_pool_attributes[
'Pool_supported_attr_convert_servers_config']
self.ignore_for_val = f5_pool_attributes['Pool_ignore_val']
# Added prefix for objects
self.prefix = prefix
def convert_pool(self, pool_name, f5_config, avi_config, user_ignore,
tenant_ref, cloud_ref, merge_object_mapping, sys_dict,
vrf=None, segroup=None):
"""
:param pool_name: name of the pool
:param f5_config: parsed f5 config dict
:param avi_config: dict for avi conversion
:param user_ignore: Ignore config defined by user
:param tenant_ref: tenant of which output to converted
:param cloud_ref: cloud of which output to converted
:param merge_object_mapping: flag for merge object
:param sys_dict: baseline dict
:return:
"""
converted_objs = {}
nodes = f5_config.get("node", {})
f5_pool = f5_config['pool'][pool_name]
monitor_config = avi_config['HealthMonitor']
servers, member_skipped_config, limits, skipped_servers = \
self.convert_servers_config(f5_pool.get("members", {}), nodes,
avi_config, cloud_ref)
sd_action = f5_pool.get("service-down-action", "")
pd_action = conv_utils.get_avi_pool_down_action(sd_action)
lb_method = f5_pool.get("load-balancing-mode", None)
lb_algorithm = self.get_avi_lb_algorithm(lb_method)
desc = f5_pool.get('description', None)
ramp_time = f5_pool.get('slow-ramp-time', None)
pool_obj = super(PoolConfigConvV11, self).create_pool_object(
pool_name, desc, servers, pd_action, lb_algorithm, ramp_time,
limits, tenant_ref, cloud_ref)
# if length of servers > 400 take only 400 servers
status_flag = False
if len(servers) > 400:
servers = servers[0:400]
status_flag = True
tenant, name = conv_utils.get_tenant_ref(pool_name)
tenant_name = tenant
if not tenant_ref == 'admin':
tenant = tenant_ref
num_retries = f5_pool.get('reselect-tries', None)
if num_retries:
server_reselect = {
"retry_nonidempotent": False,
"svr_resp_code": {
"resp_code_block": ["HTTP_RSP_4XX", "HTTP_RSP_5XX"]
},
"num_retries": num_retries,
"enabled": True
}
pool_obj['server_reselect'] = server_reselect
monitor_names = f5_pool.get("monitor", None)
skipped_monitors = []
if monitor_names:
skipped_monitors, monitor_refs = super(
PoolConfigConvV11, self).get_monitor_refs(
monitor_names, monitor_config, pool_name, tenant,
merge_object_mapping, sys_dict['HealthMonitor'])
pool_obj["health_monitor_refs"] = list(set(monitor_refs))
# Adding vrf context ref to pool obj
vrf_config = avi_config['VrfContext']
members = f5_pool.get('members')
address = (isinstance(members, dict) and members.get(members.keys()[
0]) and isinstance(members[members.keys()[0]], dict)) and \
members[members.keys()[0]].get('address') or isinstance(
members, str) and members.split(' ')[0] or None if members \
else None
if vrf:
vrf_ref = conv_utils.get_object_ref(vrf, 'vrfcontext',
tenant=tenant_name,
cloud_name=cloud_ref)
else:
vrf_ref = conv_utils.get_vrf_context_ref(address, vrf_config, 'pool',
pool_name, cloud_ref)
if vrf_ref:
pool_obj["vrf_ref"] = vrf_ref
skipped_attr = [key for key in f5_pool.keys() if
key not in self.supported_attr]
for attr in self.ignore_for_val:
ignore_val = self.ignore_for_val[attr]
actual_val = f5_pool.get(attr, None)
if not actual_val:
continue
if isinstance(ignore_val, str) and actual_val == ignore_val:
skipped_attr.remove(attr)
elif isinstance(ignore_val, list) and actual_val in ignore_val:
skipped_attr.remove(attr)
is_pg, pg_dict = self.check_for_pool_group(servers)
if is_pg:
converted_objs = self.convert_for_pg(
pg_dict, pool_obj, name, tenant, avi_config, cloud_ref)
else:
converted_objs['pools'] = [pool_obj]
# Flag to make status partial for pool.
if status_flag:
skipped_attr.append('Skipped: length of servers more than 400')
super(PoolConfigConvV11, self).add_status(
pool_name, skipped_attr, member_skipped_config, skipped_monitors,
converted_objs, user_ignore, skipped_servers)
return converted_objs
def get_avi_lb_algorithm(self, f5_algorithm):
"""
Converts f5 LB algorithm to equivalent avi LB algorithm
:param f5_algorithm: f5 algorithm name
:return: Avi LB algorithm enum value
"""
avi_algorithm = None
if not f5_algorithm or f5_algorithm in ["ratio-node", "ratio-member"]:
avi_algorithm = "LB_ALGORITHM_ROUND_ROBIN"
elif f5_algorithm in ["least-connections-member",
"least-connections-node", "least-sessions",
"weighted-least-connections-member",
"ratio-least-connections-member",
"ratio-least-connections-node",
"weighted-least-connections-node"]:
avi_algorithm = "LB_ALGORITHM_LEAST_CONNECTIONS"
elif f5_algorithm in ["fastest-node", "fastest-app-response"]:
avi_algorithm = "LB_ALGORITHM_FASTEST_RESPONSE"
elif f5_algorithm in ["dynamic-ratio-node", "observed-member",
"predictive-node", "dynamic-ratio-member",
"predictive-member", "observed-node"]:
avi_algorithm = "LB_ALGORITHM_LEAST_LOAD"
return avi_algorithm
def convert_servers_config(self, servers_config, nodes, avi_config,
cloud_ref):
"""
Converts the config of servers in the pool
:param servers_config: F5 servers config for particular pool
:param nodes: F5 node config to resolve IP of the server
:return: List of Avi server configs
"""
server_list = []
skipped_list = []
rate_limit = []
connection_limit = []
server_skipped = []
for server_name in servers_config.keys():
server = servers_config[server_name]
parts = server_name.split(':')
node = nodes.get(parts[0], None)
if node and node.get("address"):
if '%' in node["address"]:
ip_addr, vrf = node["address"].split('%')
conv_utils.add_vrf(avi_config, vrf, cloud_ref)
else:
ip_addr = node["address"]
else:
if '%' in parts[0]:
ip_addr, vrf = parts[0].split('%')
conv_utils.add_vrf(avi_config, vrf, cloud_ref)
else:
ip_addr = parts[0]
description = server.get('description', '')
port = parts[1] if len(parts) == 2 else conv_const.DEFAULT_PORT
orig_port = port
if not port.isdigit():
port = conv_utils.get_port_by_protocol(port)
if not port:
LOG.warning("Skipped: Server %s with ip %s has" % (server_name,
ip_addr) + ((" non protocol port %s" % orig_port)
if orig_port else " no port"))
server_skipped.append(server_name)
continue
enabled = True
state = server.get("state", 'enabled')
session = server.get("session", 'enabled')
if state == "user-down" or session == 'user-disabled':
enabled = False
priority = server.get('priority-group', None)
ip_addr = ip_addr.strip()
matches = re.findall('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$',
ip_addr)
if not matches:
LOG.warning('Avi does not support IPv6. Replace 1.1.1.1 '
'ipv4 for : %s' % ip_addr)
ip_addr = '1.1.1.1'
server_obj = {
'ip': {
'addr': ip_addr,
'type': 'V4'
},
'enabled': enabled,
'description': description,
'port': port
}
if priority:
server_obj['priority'] = priority
ratio = server.get("ratio", None)
if ratio:
server_obj["ratio"] = ratio
r_lim = int(server.get("rate-limit", '0'))
if r_lim > 0:
rate_limit.append(r_lim)
c_lim = int(server.get("connection-limit", '0'))
if c_lim > 0:
connection_limit.append(c_lim)
server_obj_list = [
s for s in server_list if s['ip']['addr'] ==
server_obj['ip']['addr'] and
('port' in s and 'port' in server_obj)and
(s['port'] == server_obj['port'])]
if server_obj_list:
LOG.warning('Skipped duplicate server %s' % ip_addr)
continue
server_list.append(server_obj)
skipped = [key for key in server.keys()
if key not in self.supported_attributes]
if skipped:
skipped_list.append({server_name: skipped})
limits = dict()
if rate_limit:
limits['rate_limit'] = min(rate_limit)
if connection_limit:
limits['connection_limit'] = min(connection_limit)
return server_list, skipped_list, limits, server_skipped
class PoolConfigConvV10(PoolConfigConv):
def __init__(self, f5_pool_attributes, prefix):
"""
:param f5_pool_attributes: f5 pool attributes from yaml file
:param prefix: prefix for objects
"""
self.supported_attr = f5_pool_attributes['Pool_supported_attr_1']
self.supported_attributes = f5_pool_attributes['Pool_supported_attr_2']
self.ignore_for_val = f5_pool_attributes['Pool_ignore_val']
# Added prefix for objects
self.prefix = prefix
def convert_pool(self, pool_name, f5_config, avi_config, user_ignore,
tenant_ref, cloud_ref, merge_object_mapping, sys_dict,
vrf=None, segroup=None):
"""
:param pool_name: name of the pool
:param f5_config: parsed f5 config dict
:param avi_config: dict for avi conversion
:param user_ignore: Ignore config defined by user
:param tenant_ref: tenant of which output to converted
:param cloud_ref: cloud of which output to converted
:param merge_object_mapping: flag for merge object
:param sys_dict: baseline dict
:return:
"""
nodes = f5_config.pop("node", {})
f5_pool = f5_config['pool'][pool_name]
monitor_config = avi_config['HealthMonitor']
servers, member_skipped_config, limits, skipped_servers = \
self.convert_servers_config(f5_pool.get("members", {}), nodes,
avi_config, cloud_ref)
sd_action = f5_pool.get("action on svcdown", "")
pd_action = conv_utils.get_avi_pool_down_action(sd_action)
lb_method = f5_pool.get("lb method", None)
lb_algorithm = self.get_avi_lb_algorithm(lb_method)
desc = f5_pool.get('description', None)
ramp_time = f5_pool.get('slow ramp time', None)
# if length of servers > 400 take only 400 servers.
status_flag = False
if len(servers) > 400:
servers = servers[0:400]
status_flag = True
pool_obj = super(PoolConfigConvV10, self).create_pool_object(
pool_name, desc, servers, pd_action, lb_algorithm, ramp_time,
limits, tenant_ref, cloud_ref)
monitor_names = f5_pool.get("monitor", None)
skipped_monitors = []
if monitor_names:
skipped_monitors, monitor_refs = super(
PoolConfigConvV10, self).get_monitor_refs(
monitor_names, monitor_config, pool_name, tenant_ref,
merge_object_mapping, sys_dict['HealthMonitor'])
pool_obj["health_monitor_refs"] = list(set(monitor_refs))
# Adding vrf context ref to pool obj
vrf_config = avi_config['VrfContext']
members = f5_pool.get('members')
address = (isinstance(members, dict) and members.get(members.keys()[
0]) and isinstance(members[members.keys()[0]], dict)) and \
members[members.keys()[0]].get('address') or isinstance(
members, str) and members.split(' ')[0] or None if members \
else None
if vrf:
vrf_ref = conv_utils.get_object_ref(vrf, 'vrfcontext',
tenant=tenant_ref,
cloud_name=cloud_ref)
else:
vrf_ref = conv_utils.get_vrf_context_ref(address, vrf_config, 'pool',
pool_name, cloud_ref)
if vrf_ref:
pool_obj["vrf_ref"] = vrf_ref
num_retries = f5_pool.get('reselect tries', None)
if num_retries:
server_reselect = {
"retry_nonidempotent": False,
"svr_resp_code": {
"resp_code_block": ["HTTP_RSP_4XX", "HTTP_RSP_5XX"]
},
"num_retries": num_retries,
"enabled": True
}
pool_obj['server_reselect'] = server_reselect
skipped_attr = [key for key in f5_pool.keys() if
key not in self.supported_attr]
for attr in self.ignore_for_val:
ignore_val = self.ignore_for_val[attr]
actual_val = f5_pool.get(attr, None)
if not actual_val:
continue
if isinstance(ignore_val, str) and actual_val == ignore_val:
skipped_attr.remove(attr)
elif isinstance(ignore_val, list) and actual_val in ignore_val:
skipped_attr.remove(attr)
is_pg, pg_dict = self.check_for_pool_group(servers)
converted_objs = dict()
tenant, name = conv_utils.get_tenant_ref(pool_name)
if is_pg:
converted_objs = self.convert_for_pg(pg_dict,
pool_obj, name,
tenant, avi_config, cloud_ref)
else:
converted_objs['pools'] = [pool_obj]
# Flag to make status partial for pool.
if status_flag:
skipped_attr.append('Skipped: length of servers more than 400')
super(PoolConfigConvV10, self).add_status(
pool_name, skipped_attr, member_skipped_config, skipped_monitors,
converted_objs, user_ignore, skipped_servers)
return converted_objs
def get_avi_lb_algorithm(self, f5_algorithm):
"""
Converts f5 LB algorithm to equivalent avi LB algorithm
:param f5_algorithm: f5 algorithm name
:return: Avi LB algorithm enum value
"""
avi_algorithm = None
if not f5_algorithm or f5_algorithm in ["ratio", "member ratio"]:
avi_algorithm = "LB_ALGORITHM_ROUND_ROBIN"
elif f5_algorithm in ["member least conn", "least conn", "l3 addr",
"weighted least conn member", "least sessions",
"weighted least conn node addr"]:
avi_algorithm = "LB_ALGORITHM_LEAST_CONNECTIONS"
elif f5_algorithm in ["fastest", "fastest app resp"]:
avi_algorithm = "LB_ALGORITHM_FASTEST_RESPONSE"
elif f5_algorithm in ["dynamic ratio", "member observed", "predictive",
"member predictive", "observed",
"member dynamic ratio"]:
avi_algorithm = "LB_ALGORITHM_LEAST_LOAD"
return avi_algorithm
def convert_servers_config(self, servers_config, nodes, avi_config,
cloud_ref):
"""
Converts the config of servers in the pool
:param servers_config: F5 servers config for particular pool
:return: List of Avi server configs
"""
server_list = []
skipped_list = []
connection_limit = []
server_skipped = []
if isinstance(servers_config, str):
servers_config = {servers_config.split(' ')[0]: None}
for server_name in servers_config.keys():
skipped = None
server = servers_config[server_name]
parts = server_name.split(':')
node = nodes.get(parts[0], None)
if node and '%' in node.get("address", ''):
ip_addr, vrf = node["address"].split('%')
conv_utils.add_vrf(avi_config, vrf, cloud_ref)
else:
if '%' in parts[0]:
ip_addr, vrf = parts[0].split('%')
conv_utils.add_vrf(avi_config, vrf, cloud_ref)
else:
ip_addr = parts[0]
port = parts[1] if len(parts) == 2 else conv_const.DEFAULT_PORT
orig_port = port
if not port.isdigit():
port = conv_utils.get_port_by_protocol(port)
if not port:
LOG.warning("Skipped: Server %s with ip %s has" % (server_name,
ip_addr) + ((" non protocol port %s" % orig_port)
if orig_port else " no port"))
server_skipped.append(server_name)
continue
enabled = True
state = 'enabled'
ratio = None
description = None
priority = None
if server:
state = server.get("session", 'enabled')
skipped = [key for key in server.keys()
if key not in self.supported_attributes]
ratio = server.get("ratio", None)
description = server.get('description', None)
if state == "user disabled" or 'down' in server.keys():
enabled = False
c_lim = int(server.get("limit", '0'))
if c_lim > 0:
connection_limit.append(c_lim)
priority = server.get('priority', None)
server_obj = {
'ip': {
'addr': ip_addr,
'type': 'V4'
},
'enabled': enabled,
'description': description,
'port': port
}
if priority:
server_obj['priority'] = priority
if ratio:
server_obj["ratio"] = ratio
server_list.append(server_obj)
if skipped:
skipped_list.append({server_name: skipped})
limits = dict()
if connection_limit:
limits['connection_limit'] = min(connection_limit)
return server_list, skipped_list, limits, server_skipped
| StarcoderdataPython |
6691010 | <reponame>Vikr-182/Air-Accidents-Visualizer
import json
with open("../data.json") as f:
data = json.load(f)
li = data.keys()
li = list(li)
for i in range(len(li)):
data[li[i]]["date"] = li[i]
f.close()
with open("../data.json") as f:
json.dump(data,f,indent=4,sort_keys=True)
| StarcoderdataPython |
9691460 | '''
ローカルMQTTサーバ(ブリッジ)からサブスクライブしたデータをinfluxdbに入れる
'''
import sys, os, re
import time
import json
import argparse
import paho.mqtt.client as mqtt # MQTTのライブラリをインポート
from time import sleep # 3秒間のウェイトのために使う
from influxdb_client import InfluxDBClient, Point, WriteOptions
from influxdb_client.client.write_api import ASYNCHRONOUS
SCRIPT_NAME = os.path.basename(__file__)
CLIENT_ID = os.uname()[1] + "_" + SCRIPT_NAME # クライアントID(ユニークでなければならないので注意)
MQTT_HOST = ""
MQTT_PORT = 1883
KEEP_ALIVE = 60
TOPIC = ""
QOS = 1
INFLUX_HOST = "http://{}:8086"
INFLUX_TOKEN = ""
INFLUX_ORG = "pydev"
INFLUX_BUCKET = ""
MEASUREMENT = "bme280"
INFLUX_CLIENT = None
# ブローカーに接続できたときの処理
def on_connect(client, userdata, flag, rc):
print("Connected with result code " + str(rc)) # 接続できた旨表示
return
# サブスクライブしたときの処理
def on_subscribe(client, userdata, mid, qos):
print("Subscribe: {}, QOS: {} ".format(str(mid), str(qos))) # 接続できた旨表示
return
# ブローカーが切断したときの処理
def on_disconnect(client, userdata, rc):
if rc != 0:
print("Unexpected disconnection. rc = {}".format(rc))
else:
print("Disconnected.")
return
# メッセージが届いたときの処理
def on_message(client, userdata, msg):
# msg.topicにトピック名が,msg.payloadに届いたデータ本体が入っている
try:
# msg.topicにトピック名が,msg.payloadに届いたデータ本体が入っている
_tmp = json.loads(msg.payload)
_wk = msg.topic.split('/')
async_write_bme280(_tmp, _wk[1])
except:
print("Json Error")
return
'''
async write BME280
'''
def async_write_bme280(jdata, hostname):
write_api = INFLUX_CLIENT.write_api(write_options=ASYNCHRONOUS)
_points = []
_point = Point(MEASUREMENT).tag("hostname", hostname).field("temperature", jdata["temperature"])
_points.append(_point)
_point = Point(MEASUREMENT).tag("hostname", hostname).field("humidity", jdata["humidity"])
_points.append(_point)
_point = Point(MEASUREMENT).tag("hostname", hostname).field("pressure", jdata["pressure"])
_points.append(_point)
async_result = write_api.write(bucket=INFLUX_BUCKET, record=_points)
async_result.get()
print("Write InfluxDB")
return
# メイン関数 この関数は末尾のif文から呼び出される
def main():
client = mqtt.Client() # クラスのインスタンス(実体)の作成
client.on_connect = on_connect # 接続時のコールバック関数を登録
client.on_disconnect = on_disconnect # 切断時のコールバックを登録
client.on_message = on_message # メッセージ受信時
client.on_subscribe = on_subscribe # メッセージ受信時
client.connect(MQTT_HOST, MQTT_PORT, KEEP_ALIVE)
client.subscribe(TOPIC, QOS) # サブスクライブ
client.loop_forever() # 永久ループして待ち続ける
if __name__ == '__main__': # importされないときだけmain()を呼ぶ
parser = argparse.ArgumentParser()
parser.add_argument("--hostname", type=str, default="localhost", help="hostname or ip")
parser.add_argument("--port", type=int, default=1883, help="Port number override")
parser.add_argument("--keepalive", type=int, default=60, help="")
parser.add_argument("--topic", type=str, default="l2l/test", help="Targeted topic")
parser.add_argument("--qos", type=int, default=1, help="qos=0 or 1 or 2")
parser.add_argument("--infhost", type=str, default="", help="hostname or ip")
parser.add_argument("--inftoken", type=str, default="", help="token to write db")
parser.add_argument("--infbucket", type=str, default="", help="bucketname")
args = parser.parse_args()
MQTT_HOST = args.hostname
MQTT_PORT = args.port
KEEP_ALIVE = args.keepalive
TOPIC = args.topic
QOS = args.qos
if len(args.infhost) == 0:
args.infhost = args.hostname
INFLUX_HOST = INFLUX_HOST.format(args.infhost)
INFLUX_TOKEN = args.inftoken
INFLUX_BUCKET = args.infbucket
INFLUX_CLIENT = InfluxDBClient(url=INFLUX_HOST, token=INFLUX_TOKEN, org=INFLUX_ORG)
main() # メイン関数を呼び出す
| StarcoderdataPython |
9778833 | <gh_stars>1-10
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import steelscript.appfwk.apps.report.modules.c3 as c3
import steelscript.appfwk.apps.report.modules.tables as tables
from steelscript.appfwk.apps.report.models import Report
from steelscript.netprofiler.appfwk.datasources.netprofiler import (
NetProfilerTimeSeriesTable,
NetProfilerGroupbyTable)
#
# NetProfiler report
#
report = Report.create("NetProfiler Utilization", position=10)
report.add_section()
# Define a Overall TimeSeries showing Avg Bytes/s
p = NetProfilerTimeSeriesTable.create('opt-overall', duration=60,
interface=True,
resolution="1min")
p.add_column('time', 'Time', datatype='time', iskey=True)
p.add_column('in_avg_util', 'In Avg Util %', units='pct')
p.add_column('out_avg_util', 'Out Avg Util %', units='pct')
p.add_column('50-line', '50% Util', synthetic=True, compute_expression='50')
p.add_column('70-line', '70% Util', synthetic=True, compute_expression='70')
report.add_widget(c3.TimeSeriesWidget, p, "Overall Utilization", width=12)
# Define a Pie Chart for locations
p = NetProfilerGroupbyTable.create('util-table', groupby='interface',
duration=60)
p.add_column('interface', 'Interface', datatype='string', iskey=True)
p.add_column('in_avg_util', 'In Avg Util %', units='pct')
p.add_column('out_avg_util', 'Out Avg Util %', units='pct')
p.add_column('in_peak_util', 'In Peak Util %', units='pct')
p.add_column('out_peak_util', 'Out Peak Util %', units='pct')
report.add_widget(tables.TableWidget, p, "Interface Utilization", width=12)
| StarcoderdataPython |
11212807 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Recover images from HDF5 file, with image names matching the indices
'''
import h5py
from PIL import Image
base = '/Users/yliu0/data/'
rawpath = base + 'logos.hdf5'
imgbase = base + '/logos/'
if __name__ == '__main__':
f = h5py.File(rawpath, 'r')
dset = f['logos']
for i, arr in enumerate(dset):
img = Image.fromarray(arr, 'RGB')
img.save(imgbase + '{}.jpg'.format(i))
| StarcoderdataPython |
3415623 | #!/usr/bin/env python3
from instastuff import *
# Numeric user id of the target user account.
#
# To find a user id use this tool - https://codeofaninja.com/tools/find-instagram-user-id/
#
# Example:
# target_user_id = '234567890'
target_user_id = '<user-id-here>'
# Value of the `sessionid` cookie when logged into Instagram web.
#
# To find the session id cookie use Chrome Developer Tools > Application > Cookies.
#
# Example:
# session_id = '345678901%3AaGVsbG8geW91%3A1'
session_id = '<session-id-here>'
# Run the exporter.
#
# Note: a response status code 429 usually indicates an invalid session id.
write_users(target_user_id, session_id)
| StarcoderdataPython |
119245 | <reponame>binxio/git-release-tag
import click
import re
from collections import OrderedDict
from git_release_tag.release_info import ReleaseInfo
class SemVer(click.ParamType):
"""
a semantic version in the form of major.minor.patch
"""
name = "semver"
def convert(self, value, param, ctx) -> str:
if value is None:
return value
m = re.fullmatch(r"[0-9]+\.[0-9]+\.[0-9]+", value)
if not m:
self.fail(f'could not parse "{value}" as release')
return value
class PreTagCommand(click.ParamType):
"""
a shell command containing references to @@RELEASE@@, @@TAG@@ or @@BASE_TAG@@
"""
name = "pre_tag_command"
def convert(self, value, param, ctx) -> str:
if value is None:
return value
allowed = set(["RELEASE", "TAG", "BASE_TAG"])
refs = set(re.findall(r"@@([a-zA-Z_]+)@@", value))
unsupported = refs.difference(allowed)
if unsupported:
self.fail(f"found unsupported references {unsupported}")
if "RELEASE" not in refs:
self.fail(f"expected at least a @@RELEASE@@ reference in pre tag command")
return value
class ReleaseLevel(click.Choice):
"""
release level patch, minor or major
"""
name = "release-level"
def __init__(self):
super(ReleaseLevel,self).__init__(["patch", "minor", "major"])
def convert(self, value, param, ctx) -> str:
result = super(ReleaseLevel,self).convert(value,param, ctx)
levels = {
"major": ReleaseInfo.MAJOR,
"minor": ReleaseInfo.MINOR,
"patch": ReleaseInfo.PATCH,
}
return levels.get(result)
class OrderedGroup(click.Group):
def __init__(self, name=None, commands=None, **attrs):
super(OrderedGroup, self).__init__(name, commands, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or OrderedDict()
def list_commands(self, ctx):
return self.commands
| StarcoderdataPython |
3363635 | import numpy
from utils import *
class RBM(object):
def __init__(self, input=None, n_visible=27*27, n_hidden=500, \
W=None, hbias=None, vbias=None, numpy_rng=None):
self.input = input
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
numpy_rng = numpy.random.RandomState(1234)
if W is None:
W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)))
if hbias is None:
hbias = numpy.zeros(n_hidden)
if vbias is None:
vbias = numpy.zeros(n_visible)
self.numpy_rng = numpy_rng
self.W = W
self.hbias = hbias
self.vbias = vbias
def free_energy(self, v_sample):
return
def propup(self, vis):
pre_sigmoid_activation = numpy.dot(vis, self.W) + self.hbias
return sigmoid(pre_sigmoid_activation)
def sample_h_given_v(self, v0_sample):
h1_mean = self.propup(v0_sample)
h1_sample = self.numpy_rng.binomial(size=h1_mean.shape, n=1, p=h1_mean)
return [h1_mean, h1_sample]
def propdown(self, hid):
pre_sigmoid_activation = numpy.dot(hid, self.W.T) + self.vbias
return sigmoid(pre_sigmoid_activation)
def sample_v_given_h(self, h0_sample):
v1_mean = self.propdown(h0_sample)
v1_sample = self.numpy_rng.binomial(size=v1_mean.shape, n=1, p=v1_mean)
return [v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [v1_mean, v1_sample,
h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
return
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
ph_mean, ph_sample = self.sample_h_given_v(self.input)
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
for step in xrange(k):
if step == 0:
nv_means, nv_samples,\
nh_means, nh_samples = self.gibbs_hvh(chain_start)
else:
nv_means, nv_samples,\
nh_means, nh_samples = self.gibbs_hvh(nh_samples)
self.W += lr * (numpy.dot(self.input.T, ph_mean)
- numpy.dot(nv_samples.T, nh_means))
self.vbias += lr * numpy.mean(self.input - nv_samples, axis=0)
self.hbias += lr * numpy.mean(ph_mean - nh_means, axis=0)
monitoring_cost = numpy.mean(numpy.square(self.input - nv_means))
return monitoring_cost
def get_pseudo_likelihood_cost(self, updates):
return
| StarcoderdataPython |
9779183 | """
# -*- coding:utf-8 -*-
Class wrapping the Marcel Tencé dll for controlling an optical spectrometer.
"""
import sys, os
from ctypes import cdll, create_string_buffer, POINTER, byref
from ctypes import c_uint, c_int, c_char, c_char_p, c_void_p, c_int, c_long, c_bool, c_double, c_uint64, c_uint32, Array, CFUNCTYPE, WINFUNCTYPE
import shutil
from pathlib import Path
import logging
__author__ = "<NAME> & <NAME> & <NAME>"
__status__ = "alpha"
__version__ = "0.1"
def _isPython3():
return sys.version_info[0] >= 3
def _buildFunction(call, args, result):
call.argtypes = args
call.restype = result
return call
def _createCharBuffer23(size):
if (_isPython3()):
return create_string_buffer(b'\000' * size)
return create_string_buffer('\000' * size)
def _convertToString23(binaryString):
if (_isPython3()):
return binaryString.decode("utf-8")
return binaryString
def _toString23(string):
if (_isPython3()):
return string.encode("utf-8")
return string
#is64bit = sys.maxsize > 2**32
if (sys.maxsize > 2**32):
logging.info(f'***OPT SPEC***: Placing AttoClient.dll in {sys.executable}.')
dll_path = abs_path = os.path.join(os.path.dirname(__file__), '../aux_files/DLLs/AttoClient.dll')
parent_exec = os.path.join(Path(sys.executable).parent.absolute(), 'AttoClient.dll')
shutil.copyfile(dll_path, parent_exec)
libpath = os.path.dirname(__file__)
try:
libname = os.path.join(libpath, "C:/Monch_plugins/swift-spectro/nionswift_plugin/spectro/SpectroCL.dll")
_library = cdll.LoadLibrary(libname)
except FileNotFoundError:
libname = os.path.join(libpath, "../aux_files/DLLs/SpectroCL.dll")
_library = cdll.LoadLibrary(libname)
except OSError:
libname = os.path.join(libpath, "../aux_files/DLLs/SpectroCL.dll")
_library = cdll.LoadLibrary(libname)
logging.info(f"Orsay SpectroCL library: {_library}")
else:
raise Exception("It must a python 64 bit version")
# void (*SendMyMessage)(int kind):#CHECK:
SENDMYMESSAGEFUNC = WINFUNCTYPE(None, c_int)
SENDMIRRORMESSAGEFUNC = WINFUNCTYPE(None, c_char_p)
# void MONOCL_EXPORT *OrsayMonoCLInit(int manufacturer, int portnb, void(*sendmessage)(int kind)):
_OrsayMonoCLInit=_buildFunction(_library.OrsayMonoCLInit,[c_int,c_int,SENDMYMESSAGEFUNC], c_void_p)
#void MONOCL_EXPORT *OrsayMonoCLMirrorInit(int manufacturer, int portnb, void(*sendmessage)(int kind), void(*sendmirrormessage)(const char *message));
_OrsayMonoCLWithMirrorInit=_buildFunction(_library.OrsayMonoCLMirrorInit,[c_int,c_int,SENDMYMESSAGEFUNC,SENDMIRRORMESSAGEFUNC], c_void_p)
#CHECK: bonne forme pour SENDMYMESSAGEFUNC?
# void MONOCL_EXPORT OrsayMonoCLClose(void* o):
_OrsayMonoCLClose=_buildFunction(_library.OrsayMonoCLClose,[c_void_p], None)
#
#
# void MONOCL_EXPORT SendWaveLength(self, double value):
_SendWaveLength=_buildFunction(_library.SendWaveLength,[c_void_p, c_double], None)
# void MONOCL_EXPORT SendGrating(self, int number):
_SendGrating=_buildFunction(_library.SendGrating,[c_void_p, c_int], None)
# void MONOCL_EXPORT SendReinit(self):
#NOT IMPLEMENTED
# void MONOCL_EXPORT SendSlitEntranceFront(self, double value):
_SendSlitEntranceFront=_buildFunction(_library.SendSlitEntranceFront,[c_void_p, c_double], None)
# void MONOCL_EXPORT SendSlitEntranceSide(self, double value):
_SendSlitEntranceSide=_buildFunction(_library.SendSlitEntranceSide,[c_void_p,c_double], None)
# void MONOCL_EXPORT SendSlitExitFront(self, double value):
_SendSlitExitFront=_buildFunction(_library.SendSlitExitFront,[c_void_p,c_double], None)
# void MONOCL_EXPORT SendSlitExitSide(self, double value):
_SendSlitExitSide=_buildFunction(_library.SendSlitExitSide,[c_void_p,c_double], None)
# void MONOCL_EXPORT SendStatusUpdate(self):
_SendStatusUpdate=_buildFunction(_library,[c_void_p], None)
# void MONOCL_EXPORT SendSwitchToAxialEntry(self):
_SendSwitchToAxialEntry=_buildFunction(_library.SendSwitchToAxialEntry,[c_void_p], None)
# void MONOCL_EXPORT SendSwitchToLateralEntry(self):
_SendSwitchToLateralEntry=_buildFunction(_library.SendSwitchToLateralEntry,[c_void_p], None)
# double MONOCL_EXPORT GetCurrentWaveLength(self):
_GetCurrentWaveLength=_buildFunction(_library.GetCurrentWaveLength,[c_void_p], c_double)
# int MONOCL_EXPORT GetCurrentGroove(void *o):
_GetCurrentGroove=_buildFunction(_library.GetCurrentGroove,[c_void_p], c_int)
# void MONOCL_EXPORT InitSpectro(self, int portnb):
_InitSpectro=_buildFunction(_library.InitSpectro,[c_void_p,c_int], c_void_p)
# const MONOCL_EXPORT char *gratingText(self, int number):
_gratingText=_buildFunction(_library.gratingText,[c_void_p,c_int], c_char_p)
# double MONOCL_EXPORT GetCLSpectrumRange(self, double pixelWidth, int nbPixels):
_GetCLSpectrumRange=_buildFunction(_library.GetCLSpectrumRange,[c_void_p, c_double,c_int], c_double)
# double MONOCL_EXPORT GetCLSpectrumCenter(self):
_GetCLSpectrumCenter=_buildFunction(_library.GetCLSpectrumCenter,[c_void_p], c_double)
#
# int MONOCL_EXPORT IsReady(self):
_IsReady=_buildFunction(_library.IsReady,[c_void_p], c_int)
#
# bool MONOCL_EXPORT IsConnected(self):
_IsConnected=_buildFunction(_library.IsConnected,[c_void_p], c_bool)
# void (*statusUpdateA)(double waveLenght, int slitEntranceFront, int slitExitSide, int grating, int exitMirror):
STATUSUPDATEA= WINFUNCTYPE(None,c_double,c_int,c_int,c_int,c_int)#CHECK: pointer to a function!!! and not reused...
#_statusUpdateA=_buildFunction(_library,[c_double,c_int,c_int,c_int,c_int],c_void_p)
# const char MONOCL_EXPORT *spectroModel(self):
_spectroModel=_buildFunction(_library.spectroModel,[c_void_p], c_char_p)
# int MONOCL_EXPORT nbgratings(self):
_nbgratings=_buildFunction(_library.nbgratings,[c_void_p], c_int)
# int MONOCL_EXPORT grating(self):
_grating=_buildFunction(_library.grating,[c_void_p], c_int)
# const char MONOCL_EXPORT *GratingsNames(self, int gr):
_GratingsNames=_buildFunction(_library.GratingsNames,[c_void_p,c_int], c_char_p)
# double MONOCL_EXPORT Centre(self):
_Centre=_buildFunction(_library.Centre,[c_void_p], c_double)
# int MONOCL_EXPORT exitMirror(self):
_exitMirror=_buildFunction(_library.exitMirror,[c_void_p], c_int)
# bool MONOCL_EXPORT hasEntranceAxialSlit(self):
_hasEntranceAxialSlit=_buildFunction(_library.hasEntranceAxialSlit,[c_void_p], c_bool)
# bool MONOCL_EXPORT hasEntranceSideSlit(self):
_hasEntranceSideSlit=_buildFunction(_library.hasEntranceSideSlit,[c_void_p], c_bool)
# bool MONOCL_EXPORT hasExitAxialSlit(self):
_hasExitAxialSlit=_buildFunction(_library.hasExitAxialSlit,[c_void_p], c_bool)
# bool MONOCL_EXPORT hasExitSideSlit(self):
_hasExitSideSlit=_buildFunction(_library.hasExitSideSlit,[c_void_p], c_bool)
# double MONOCL_EXPORT EntranceAxialSlitValue(self):
_EntranceAxialSlitValue=_buildFunction(_library.EntranceAxialSlitValue,[c_void_p], c_double)
# double MONOCL_EXPORT EntranceSideSlitValue(self):
_EntranceSideSlitValue=_buildFunction(_library.EntranceSideSlitValue,[c_void_p],c_double)
# double MONOCL_EXPORT ExitAxialSlitValue(self):
_ExitAxialSlitValue=_buildFunction(_library.ExitAxialSlitValue,[c_void_p],c_double)
# double MONOCL_EXPORT ExitSideSlitValue(self):
_ExitSideSlitValue=_buildFunction(_library.ExitSideSlitValue,[c_void_p],c_double)
_MirrorSendCommand=_buildFunction(_library.MirrorSendCommand, [c_void_p, c_char_p], None)
class OptSpectrometer:
""" class wrapping the spectrometer class from CMonoCL.dll
requires CMonoCL.dll to run """
def __init__(self, sendmessage:callable, manufacturer=2, portnb=6, sendmirrormessage=None)->None:
"""
sendmessage is a python callback you provide to the dll. The callback will be called when some property of the spectro is actullay set, afer a "send" method has been called
the integer value gives the type of property being set
10+1:grating #
10+2:wavelength
10+3:slit entrance front
10+4:slit entrance side
10+5: slit exit front
10+6:slit exit side
sendmessage is likely to be a method from a delegate object
"""
# void MONOCL_EXPORT *OrsayMonoCLInit(int manufacturer, int portnb, void(*sendmessage)(int kind)):
# manufacturer
# 1 Acton
# 2 Attolight (pas encore testé)
# -1 Acton simulé.
if sendmirrormessage is None:
self.OrsayMonoCL=_OrsayMonoCLInit(manufacturer, portnb, sendmessage)
else:
self.OrsayMonoCL=_OrsayMonoCLWithMirrorInit(manufacturer, portnb, sendmessage, sendmirrormessage)
self.success = True
self.sendmessage = sendmessage
def OrsayMonoCLCLose(self) -> None:
_OrsayMonoCLClose(self.OrsayMonoCL)
# force reference to dll to be null.
self.OrsayMonoCL = 0
#
def SendWaveLength(self,wavelength:float) -> None:
_SendWaveLength(self.OrsayMonoCL,wavelength)
#
def SendGrating(self, gratingnumber:float)-> None:
_SendGrating(self.OrsayMonoCL,gratingnumber)
def SendSlitEntranceFront(self,value:float)->None:
_SendSlitEntranceFront(self.OrsayMonoCL, value)
def SendSlitEntranceSide(self,value:float)->None:
_SendSlitEntranceSide(self.OrsayMonoCL,value)
def SendSlitExitFront(self,value:float)-> None:
_SendSlitExitFront(self.OrsayMonoCL, value)
def SendSlitExitSide(self,value: float) -> None:
_SendSlitExitSide(self.OrsayMonoCL, value)
def SendStatusUpdate(self) -> None:
_SendStatusUpdate(self.OrsayMonoCL)
def SendSwitchToAxialEntry(self) -> None:
_SendSwitchToAxialEntry(self.OrsayMonoCL)
def SendSwitchToLateralEntry(self) -> None:
_SendSwitchToLateralEntry(self.OrsayMonoCL)
def GetCurrentWaveLength(self) -> float:
return _GetCurrentWaveLength(self.OrsayMonoCL)
def GetCurrentGroove(self) -> int:
return _GetCurrentGroove(self.OrsayMonoCL)
def InitSpectro(self,portnb:int) -> None:#CHECK: where to call this init? in the __init__ of this function, or from the "spectrodevice"?
_InitSpectro(self.OrsayMonoCL,portnb)
def gratingText(self,number: int) -> str:
return _convertToString23(_gratingText(self.OrsayMonoCL,number))
def GetCLSpectrumRange(self,pixelWidth: float,nbPixels: int) -> float:
return _GetCLSpectrumRange(self.OrsayMonoCL,pixelWidth,nbPixels)
def GetCLSpectrumCenter(self) -> float:
return _GetCLSpectrumCenter(self.OrsayMonoCL)
#
def IsReady(self) -> int:
return _IsReady(self.OrsayMonoCL)
#
def IsConnected(self) -> bool:
return _IsConnected(self.OrsayMonoCL)
# void (*statusUpdateA)(double waveLenght, int slitEntranceFront, int slitExitSide, int grating, int exitMirror):
#STATUSUPDATEA= WINFUNCTYPE(None,c_double,c_int,c_int,c_int,c_int)#CHECK: pointer to a function!!! and not reused...
#_statusUpdateA=_buildFunction(_library,[c_double,c_int,c_int,c_int,c_int],c_void_p)
def spectroModel(self) -> str:
return _spectroModel(self.OrsayMonoCL)
def nbgratings(self) -> int:
return _nbgratings(self.OrsayMonoCL)
def grating(self) -> int:
return _grating(self.OrsayMonoCL)
def GratingsNames(self, grating:int)-> str:
return _GratingsNames(self.OrsayMonoCL,grating)
def Centre(self) -> float:
return _Centre(self.OrsayMonoCL)
def exitMirror(self) -> int:
return _exitMirror(self.OrsayMonoCL)
def hasEntranceAxialSlit(self) -> bool:
return _hasEntranceAxialSlit(self.OrsayMonoCL)
def hasEntranceSideSlit(self) -> bool:
return _hasEntranceSideSlit(self.OrsayMonoCL)
def hasExitAxialSlit(self) -> bool:
return _hasExitAxialSlit(self.OrsayMonoCL)
def hasExitSideSlit(self) -> bool:
return _hasExitSideSlit(self.OrsayMonoCL)
def EntranceAxialSlitValue(self):
return _EntranceAxialSlitValue(self.OrsayMonoCL)
def EntranceSideSlitValue(self):
return _EntranceSideSlitValue(self.OrsayMonoCL)
def ExitAxialSlitValue(self):
return _ExitAxialSlitValue(self.OrsayMonoCL)
def ExitSideSlitValue(self):
return _ExitSideSlitValue(self.OrsayMonoCL)
def MirrorSend(self, command:str):
return _MirrorSendCommand(self.OrsayMonoCL,command.encode('ascii'))
## Second Version. Compatible with VG Lumiere and ChromaTEM
def gratingLPMM(self):
lpmms = list()
for i in range(3):
lpmms.append(float(self.GratingsNames(i).decode().split('g/mm')[0]))
return lpmms
def gratingNames(self):
grat = list()
for i in range(3):
grat.append(self.GratingsNames(i).decode())
return grat
def get_wavelength(self):
wav = self.GetCurrentWaveLength()*1e9
return wav
def set_wavelength(self, wl):
self.SendWaveLength(wl*1e-9)
return True
def get_grating(self):
return self.grating()
def set_grating(self, value):
self.SendGrating(value)
return True
def get_exit(self):
return self.EntranceSideSlitValue()*1e6
def set_exit(self, value):
self.SendSlitEntranceSide(value*1e-6)
self.sendmessage(14)
return True
def get_entrance(self):
return self.EntranceAxialSlitValue()*1e6
def set_entrance(self, value):
self.SendSlitEntranceFront(value*1e-6)
return True
def get_which(self):
val = self.exitMirror()
if val == 1: # Axial position
return 0
elif val == 2: # Lateral position
return 1
def set_which(self, value):
if value==0: #AXIAL
self.SendSwitchToAxialEntry()
elif value==1: #LATERAL
self.SendSwitchToLateralEntry()
def get_specFL(self):
return 320.0
def which_camera(self):
return 'orsay_camera_eire'
def camera_pixels(self):
return 1600
def deviation_angle(self):
return 0.34 | StarcoderdataPython |
1857392 | import pytest
import requests
class APIClient:
"""
Упрощенный клиент для работы с API
Инициализируется базовым url на который пойдут запросы
"""
def __init__(self, base_address):
self.base_address = base_address
def create_session(self):
return requests.Session()
def custom(self, method, path="/", **kwargs):
url = self.base_address + path
return requests.request(method, url=url, **kwargs)
def post(self, path="/", params=None, data=None, headers=None):
url = self.base_address + path
return requests.post(url=url, params=params, data=data, headers=headers)
def get(self, path="/", params=None):
url = self.base_address + path
return requests.get(url=url, params=params)
def pytest_addoption(parser):
parser.addoption("--url", default="http://localhost:8888")
@pytest.fixture(scope="session")
def api_client(request):
base_url = request.config.getoption("--url")
return APIClient(base_address=base_url)
| StarcoderdataPython |
4893609 | from typing import Set
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
description: str = None
price: float
tax: float = None
tags: Set[str] = []
@app.post(
"/items/",
response_model=Item,
summary="Create an item",
description="Create an item with all the information, name, description, price, tax and a set of unique tags",
)
async def create_item(item: Item):
return item
| StarcoderdataPython |
1794660 | class Solution(object):
def XXX(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: bool
"""
#递归
if not root:
return False
elif not root.left and not root.right: #root是否为叶子节点
return root.val == targetSum
return self.XXX(root.left,targetSum-root.val) or self.XXX(root.right,targetSum-root.val)
| StarcoderdataPython |
8096975 | """
.. module:: wagtailsnapshotpublisher.views
"""
import json
import logging
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.forms.models import modelform_factory
from django.http import JsonResponse, HttpResponseServerError, Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.core import serializers
from django.db.models import Q
from wagtail.core.models import Page, PageRevision
from wagtail.admin import messages
from djangosnapshotpublisher.publisher_api import PublisherAPI
from djangosnapshotpublisher.models import ContentRelease
from .models import WSSPContentRelease, document_load_dynamic_elements
from .forms import PublishReleaseForm, FrozenReleasesForm
from .utils import get_dynamic_element_keys
from .signals import release_was_staged, reindex_release
logger = logging.getLogger('django')
DATETIME_FORMAT='%Y-%m-%d %H:%M'
#
# Return upcoming scheduled releases.
#
def get_releases(request, site_code):
""" get_releases """
# time_now = timezone.now()
# logger.info('Getting releases for site code %s after %s', site_code, time_now.strftime(DATETIME_FORMAT))
# response = list_live_and_upcoming_content_releases(site_code, 1, time_now) # Status FROZEN = 1
# if response['status'] == 'success':
# releases = list(response['content'].values('uuid', 'title', 'publish_datetime', 'is_live'))
# logger.info('Releases are %s', releases)
# else:
# return response
live_release = None
stage_release = None
publisher_api = PublisherAPI()
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
live_release = {
'uuid': response['content'].uuid,
'title': response['content'].title,
'publish_datetime': response['content'].publish_datetime,
}
response = publisher_api.get_stage_content_release(site_code)
if response['status'] == 'success':
stage_release = {
'uuid': response['content'].uuid,
'title': response['content'].title,
'publish_datetime': response['content'].publish_datetime,
}
return JsonResponse({
'stage': stage_release,
'live': live_release,
}, safe=False)
def list_live_and_upcoming_content_releases(site_code, status=None, after=None):
""" list_content_releases """
try:
content_releases = ContentRelease.objects.filter(site_code=site_code)
if status:
content_releases = content_releases.filter(status=status)
if after:
content_releases = content_releases.filter(Q(publish_datetime__gte=after) | Q(is_live=True))
return { 'status': 'success', 'content': content_releases }
except:
return { 'status': 'failed', 'content': 'Unable to fetch upcoming content releases' }
def get_content_details(site_code, release_uuid, content_type, content_key):
""" get_content_details """
publisher_api = PublisherAPI()
content_release = None
try:
if release_uuid:
# get ContentRelease
content_release = WSSPContentRelease.objects.get(
site_code=site_code,
uuid=release_uuid,
)
else:
# get live ContentRelease
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'error':
return response
else:
release = response['content']
content_release = WSSPContentRelease.objects.get(id=release.id)
release_uuid = content_release.uuid
except WSSPContentRelease.DoesNotExist:
pass
# Fetch document from the content release.
response = publisher_api.get_document_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
base_content_release = None
if response['status'] == 'error' and response['error_code'] == 'release_document_does_not_exist':
# Release doc not found, try in the base release for preview releases.
if content_release.status == 0:
if content_release.use_current_live_as_base_release:
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
release = response['content']
base_content_release = WSSPContentRelease.objects.get(id=release.id)
else:
base_content_release = content_release.base_release
if base_content_release != None:
# Fetch document from the base content release if available (should only happen for preview releases).
response = publisher_api.get_document_from_content_release(
site_code,
base_content_release.uuid,
content_key,
content_type,
)
if response['status'] == 'success':
data = json.loads(response['content'].document_json)
response_extra = publisher_api.get_document_extra_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
if response_extra['status'] == 'success':
try:
dynamic_element_keys = json.loads(response_extra['content'].get(key='dynamic_element_keys').content)
data, updated = document_load_dynamic_elements(content_release, data, dynamic_element_keys)
except:
pass
else:
return response
return data
def unpublish_page(request, page_id, release_id, recursively=False):
""" unpublish_page """
page = get_object_or_404(Page, id=page_id).specific
page.unpublish_or_delete_from_release(release_id, recursively)
return redirect('wagtailadmin_explore', page.get_parent().id)
def unpublish_recursively_page(request, page_id, release_id):
""" unpublish_recursively_page """
return unpublish_page(request, page_id, release_id, True)
def unpublish(request, content_app, content_class, content_id, release_id):
""" unpublish """
model_class = apps.get_model(content_app, content_class)
instance = get_object_or_404(model_class, id=content_id)
instance.unpublish_or_delete_from_release(release_id)
return redirect(absolute_path(request, content_app, content_class))
def republish_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/republish/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
publish_current = request.POST.get('publish_current') # Only re-publish pages that are already published to the release.
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.republish_to_release_with_id(release.id, not publish_current)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to re-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/republish/results.html', details)
def unpublish_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/unpublish/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.unpublish_from_release_with_id(release.id)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to un-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/unpublish/results.html', details)
def remove_page_and_children(request, page_id=None):
page = Page.objects.get(id=page_id).specific
if request.method == 'GET':
site_code = page.site_code
publisher_api = PublisherAPI()
rsp = publisher_api.list_content_releases(site_code, status='0', after=None) # Fetch preview releases ('0' will be cast to an integer)
if rsp['status'] == 'success':
releases = rsp['content']
else:
releases = []
details = {
'page': page,
'releases': releases,
}
return render(request, 'wagtailadmin/remove/detail.html', details)
if request.method == 'POST':
release_uuid = request.POST.get('release_uuid')
publish_current = request.POST.get('publish_current')
release = WSSPContentRelease.objects.get(uuid=release_uuid)
rsp = page.remove_from_release_with_id(release.id)
if rsp['code'] == 0:
error = None
results = rsp['results']
total_count = rsp['total_count']
error_count = rsp['error_count']
else:
error = 'Failed to un-publish page: %s' % (rsp['message'])
results = []
total_count = None
error_count = None
details = {
'page': page,
'results': results,
'results_total': total_count,
'results_errors': error_count,
'error': error,
}
return render(request, 'wagtailadmin/remove/results.html', details)
def remove_page(request, page_id, release_id, recursively=False):
""" remove_page """
page = get_object_or_404(Page, id=page_id).specific
page.unpublish_or_delete_from_release(release_id, recursively, True)
return redirect('wagtailadmin_explore', page.get_parent().id)
def remove_recursively_page(request, page_id, release_id):
""" remove_recursively_page """
return remove_page(request, page_id, release_id, True)
def remove(request, content_app, content_class, content_id, release_id):
""" remove """
model_class = apps.get_model(content_app, content_class)
instance = get_object_or_404(model_class, id=content_id)
instance.unpublish_or_delete_from_release(release_id, False, True)
return redirect(absolute_path(request, content_app, content_class))
def preview_model(request, content_app, content_class, content_id, preview_mode='default',
load_dynamic_element=False):
""" preview_model """
model_class = apps.get_model(content_app, content_class)
form_class = modelform_factory(
model_class, fields=[field.name for field in model_class._meta.get_fields()])
form = form_class(request.POST)
if form.is_valid():
instance = form.save(commit=False)
serializers = instance.get_serializers()
serialized_page = serializers[preview_mode]['class'](instance=instance)
data = serialized_page.data
if load_dynamic_element:
dynamic_element_keys = get_dynamic_element_keys(data)
if dynamic_element_keys:
data, updated = document_load_dynamic_elements(instance.live_release, data, dynamic_element_keys)
return JsonResponse(data)
else:
if not settings.TESTING:
print(form.errors)
return HttpResponseServerError('Form is not valid')
def preview_instance(request, content_app, content_class, content_id, preview_mode='default',
load_dynamic_element=True):
""" preview_instance """
model_class = apps.get_model(content_app, content_class)
instance = model_class.objects.get(id=content_id)
serializers = instance.get_serializers()
serialized_page = serializers[preview_mode]['class'](instance=instance)
data = serialized_page.data
if load_dynamic_element:
dynamic_element_keys = get_dynamic_element_keys(data)
if dynamic_element_keys:
data, updated = document_load_dynamic_elements(instance.live_release, data, dynamic_element_keys)
return JsonResponse(data)
def compare_release(request, release_id, release_id_to_compare_to=None, set_live_button=False, set_stage_button=False):
""" compare_release """
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
publish_release_form = PublishReleaseForm()
frozen_releases_form = FrozenReleasesForm(release.site_code)
if request.method == 'POST' and release_id_to_compare_to is None:
frozen_releases_form = FrozenReleasesForm(release.site_code, request.POST)
if frozen_releases_form.is_valid():
# redirect to compare with this release
release_id_to_compare_to = frozen_releases_form.cleaned_data['releases']
return release_detail(request, release_id, set_live_button, set_stage_button, release_id_to_compare_to.id)
if request.method == 'POST' and release_id_to_compare_to is None:
publish_release_form = PublishReleaseForm(request.POST)
if publish_release_form.is_valid():
publish_datetime = publish_release_form.cleaned_data['publish_datetime']
if publish_datetime:
publish_datetime = publish_datetime.strftime('%Y-%m-%dT%H:%M:%S%z')
return release_set_live(request, release_id, publish_datetime)
if frozen_releases_form.fields['releases'].queryset is None or \
not frozen_releases_form.fields['releases'].queryset.exists():
frozen_releases_form = None
# get current live release
compare_with_live = True
response = publisher_api.get_live_content_release(release.site_code)
if response['status'] == 'error':
return {
'release': release,
'error_msg': response['error_msg'],
'publish_release_form': publish_release_form,
}
release_to_compare_to = response['content']
if release_id_to_compare_to and release_to_compare_to.id != release_id_to_compare_to:
compare_with_live = False
release_to_compare_to = WSSPContentRelease.objects.get(id=release_id_to_compare_to)
else:
release_to_compare_to = WSSPContentRelease.objects.get(id=release_to_compare_to.id)
response = publisher_api.compare_content_releases(release.site_code, release.uuid,
release_to_compare_to.uuid)
comparison = response['content']
added_pages = []
removed_pages = []
changed_pages = []
extra_contents = []
for item in comparison:
if item['content_type'] == 'page':
try:
if item['diff'] == 'Added':
try:
page_revision = PageRevision.objects.get(id=item['parameters']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['page_revision'] = page_revision
item['title'] = json.loads(page_revision.content_json)['title']
added_pages.append(item)
if item['diff'] == 'Removed':
try:
page_revision = PageRevision.objects.get(id=item['parameters']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['title'] = json.loads(page_revision.content_json)['title']
item['page_revision'] = page_revision
removed_pages.append(item)
if item['diff'] == 'Changed' and 'revision_id' in item['parameters']['release_from']:
try:
page_revision = PageRevision.objects.get(id=item['parameters']['release_from']['revision_id'])
except:
logger.info('Page has been deleted from Wagtail: %s' % (item))
item['deleted_in_wagtail'] = True
item['page_revision_from'] = page_revision
item['page_revision_compare_to'] = PageRevision.objects.get(
id=item['parameters']['release_compare_to']['revision_id'])
item['title'] = json.loads(page_revision.content_json)['title']
changed_pages.append(item)
except Exception as e:
logger.info('Error while comparing item: %s' % (item))
else:
extra_contents.append(item)
return {
'comparison': comparison,
'added_pages': added_pages,
'changed_pages': changed_pages,
'removed_pages': removed_pages,
'extra_contents': json.dumps(extra_contents, indent=4) if extra_contents and \
request.user.has_perm('wagtailadmin.access_dev') else None,
'release': release,
'release_to_compare_to': release_to_compare_to,
'publish_release_form': publish_release_form,
'frozen_releases_form': frozen_releases_form,
'compare_with_live': compare_with_live,
}
def release_detail(request, release_id, set_live_button=False, set_stage_button=False, release_id_to_compare_to=None):
details = compare_release(request, release_id, release_id_to_compare_to, set_live_button, set_stage_button)
details.update({
'set_live_button': set_live_button,
'set_stage_button': set_stage_button,
})
return render(request, 'wagtailadmin/release/detail.html', details)
def release_reindex(request, release_id):
# Send reindex_release signal.
release = WSSPContentRelease.objects.get(id=release_id)
reindex_release.send(sender=release.__class__, release=release)
messages.success(request, 'Indexer command sent')
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def release_set_live(request, release_id, publish_datetime=None, set_live_button=False):
""" release_set_live """
if not publish_datetime:
if request and request.POST:
form = PublishReleaseForm(request.POST)
if form.is_valid():
if form.cleaned_data['publish_type'] == 'now':
# Will publish immediately.
publish_datetime = None
else:
publish_datetime = form.cleaned_data['publish_datetime']
else:
logger.debug('Set live form is invalid')
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
response = None
# save publisher user in release
if request:
release.publisher = request.user
release.save()
if publish_datetime:
logger.info('Setting release %s live at %s', release.uuid, publish_datetime)
response = publisher_api.set_live_content_release(
release.site_code,
release.uuid,
publish_datetime,
)
else:
logger.info('Setting release %s live immediately', release.uuid)
response = publisher_api.set_live_content_release(release.site_code, release.uuid)
# if response['status'] != 'success':
# raise Http404(response['error_msg'])
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def release_set_stage(request, release_id, publish_datetime=None, set_stage_button=False):
""" release_set_stage """
if not publish_datetime:
if request and request.POST:
form = PublishReleaseForm(request.POST)
if form.is_valid():
if form.cleaned_data['publish_type'] == 'now':
# Will publish immediately.
publish_datetime = None
else:
publish_datetime = form.cleaned_data['publish_datetime']
else:
logger.debug('Set stage form is invalid')
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
response = None
# save publisher user in release
if request:
release.publisher = request.user
release.save()
if publish_datetime:
logger.info('Setting release %s stage at %s', release.uuid, publish_datetime)
response = publisher_api.freeze_content_release(release.site_code, release.uuid,
publish_datetime)
else:
logger.info('Setting release %s stage immediately', release.uuid)
response = publisher_api.set_stage_content_release(release.site_code, release.uuid)
if response['status'] != 'success':
raise Http404(response['error_msg'])
# Send release_was_staged signal.
release_was_staged.send(sender=release.__class__, release=release)
WSSPContentRelease.objects.stage(
site_code=release.site_code,
)
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def release_unset_stage(request, release_id):
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
response = None
# save publisher user in release
if request:
release.publisher = request.user
release.save()
logger.info('Setting release %s stage immediately', release.uuid)
response = publisher_api.unset_stage_content_release(release.site_code, release.uuid)
if response['status'] != 'success':
raise Http404(response['error_msg'])
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def release_archive(request, release_id):
""" release_archive """
publisher_api = PublisherAPI()
release = WSSPContentRelease.objects.get(id=release_id)
response = publisher_api.archive_content_release(release.site_code, release.uuid)
if response['status'] != 'success':
raise Http404(response['error_msg'])
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def get_document_release(request, site_code, content_release_uuid=None, content_type='content',
content_key=None):
""" get_document_release """
return JsonResponse(
get_content_details(site_code, content_release_uuid, content_type, content_key)
)
def release_restore(request, release_id):
""" release_restore """
try:
release_to_restore = WSSPContentRelease.objects.get(id=release_id)
site_code = release_to_restore.site_code
live_release = WSSPContentRelease.objects.live(site_code=site_code)
WSSPContentRelease.objects.lives(
site_code=site_code,
).exclude(
pk=live_release.pk,
).get(
pk=release_to_restore.pk,
)
except WSSPContentRelease.DoesNotExist:
raise Http404(_('This release cannot be restore'))
title = release_to_restore.title
if not release_to_restore.restored:
title = '{} - Restored'.format(release_to_restore.title)
release = WSSPContentRelease(
version_type=0,
title=title,
site_code=site_code,
base_release=release_to_restore,
restored=True,
)
release.save()
return release_set_live(request, release.id)
def release_unfreeze(request, release_id):
""" release_unfreeze """
try:
release_to_restore = WSSPContentRelease.objects.get(id=release_id)
release_to_restore.status = 0
release_to_restore.publish_datetime = None
release_to_restore.save()
except WSSPContentRelease.DoesNotExist:
raise Http404(_('This release cannot be restore'))
return redirect(absolute_path(request, 'wagtailsnapshotpublisher', 'wsspcontentrelease'))
def absolute_path(request, content_app, content_class):
admin = request.path
if admin.startswith('/'):
admin = admin.lstrip('/')
admin = admin.split('/')[0]
return '/{}/{}/{}/'.format(admin, content_app, content_class)
| StarcoderdataPython |
1904896 | from typing import Union, Callable
import torch
from torch import Tensor
def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:
"""Audio normalization of a tensor in-place. The normalization can be a bool,
a number, or a callable that takes the audio tensor as an input. SoX uses
32-bit signed integers internally, thus bool normalizes based on that assumption.
"""
if not normalization:
return
if isinstance(normalization, bool):
normalization = 1 << 31
if isinstance(normalization, (float, int)):
# normalize with custom value
signal /= normalization
elif callable(normalization):
signal /= normalization(signal)
def check_input(src: Tensor) -> None:
if not torch.is_tensor(src):
raise TypeError('Expected a tensor, got %s' % type(src))
if src.is_cuda:
raise TypeError('Expected a CPU based tensor, got %s' % type(src))
| StarcoderdataPython |
1603912 | from typing import List, Optional, TreeNode
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:
def leftRightHelper(l, r):
if l > r: return None
mid = (l + r) // 2
root = TreeNode(nums[mid])
root.left = leftRightHelper(l, mid-1)
root.right = leftRightHelper(mid+1, r)
return root
return leftRightHelper(0,len(nums)-1) | StarcoderdataPython |
4840763 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^dt/create/', views.dtCreate, name="dt_create"),
url(r'^dt/new/', views.dtNew, name="dt_new"),
url(r'^dt/', views.dtIndex, name="dt_index"),
url(r'^qt/create/', views.qtCreate, name="qt_create"),
url(r'^qt/new/', views.qtNew, name="qt_new"),
url(r'^qt/', views.qtIndex, name="qt_index"),
url(r'^roughs/(?P<pk>\d+)/update/', views.roughsUpdate,
name="roughs_update"),
url(r'^roughs/(?P<pk>\d+)/edit/', views.roughsEdit,
name="roughs_edit"),
url(r'^roughs/(?P<pk>\d+)/', views.roughsDetail,
name="roughs_detail"),
url(r'^roughs/create/', views.roughsCreate, name="roughs_create"),
url(r'^roughs/new/', views.roughsNew, name="roughs_new"),
url(r'^roughs/', views.roughsIndex, name="roughs_index"),
url(r'^fairways/(?P<pk>\d+)/update/', views.fairwaysUpdate,
name="fairways_update"),
url(r'^fairways/(?P<pk>\d+)/edit/', views.fairwaysEdit,
name="fairways_edit"),
url(r'^fairways/(?P<pk>\d+)/', views.fairwaysDetail,
name="fairways_detail"),
url(r'^fairways/create/', views.fairwaysCreate,
name="fairways_create"),
url(r'^fairways/new/', views.fairwaysNew, name="fairways_new"),
url(r'^fairways/', views.fairwaysIndex, name="fairways_index"),
url(r'^tees/(?P<pk>\d+)/update/', views.teesUpdate,
name="tees_update"),
url(r'^tees/(?P<pk>\d+)/edit/', views.teesEdit, name="tees_edit"),
url(r'^tees/(?P<pk>\d+)/', views.teesDetail, name="tees_detail"),
url(r'^tees/create/', views.teesCreate, name="tees_create"),
url(r'^tees/new/', views.teesNew, name="tees_new"),
url(r'^tees/', views.teesIndex, name="tees_index"),
url(r'^greens/(?P<pk>\d+)/update/', views.greensUpdate,
name="greens_update"),
url(r'^greens/(?P<pk>\d+)/edit/', views.greensEdit,
name="greens_edit"),
url(r'^greens/(?P<pk>\d+)/', views.greensDetail,
name="greens_detail"),
url(r'^greens/create/', views.greensCreate, name="greens_create"),
url(r'^greens/new/', views.greensNew, name="greens_new"),
url(r'^greens/', views.greensIndex, name="greens_index"),
url(r'^', views.index, name="index"),
]
| StarcoderdataPython |
42228 | from pandapower.shortcircuit.calc_sc import calc_sc
from pandapower.shortcircuit.toolbox import * | StarcoderdataPython |
1902868 | <reponame>RodneyByte/main
import sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebView
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class BrowserDialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(1024, 768)
self.qwebview = QWebView(Dialog)
self.qwebview.setGeometry(QtCore.QRect(0, 50, 1020, 711))
self.qwebview.setObjectName(_fromUtf8("kwebview"))
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(10, 20, 1000, 25))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Main - Simple, Privacy", "Main Browser", None))
| StarcoderdataPython |
3530501 | # Training code for the CVAE driver sensor model. Code is adapted from: https://github.com/sisl/EvidentialSparsification and
# https://github.com/StanfordASL/Trajectron-plus-plus.
import os
import time
seed = 123
import numpy as np
np.random.seed(seed)
from matplotlib import pyplot as plt
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
torch.autograd.set_detect_anomaly(True)
from copy import deepcopy
import pdb
import io
import PIL.Image
from tqdm import tqdm
import argparse
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ExponentialLR
from collections import OrderedDict, defaultdict
os.chdir("../..")
from src.utils.utils_model import to_var
from src.driver_sensor_model.models_cvae import VAE
from src.utils.data_generator import *
import time
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
def plot_bar(alpha_p,alpha_q,args):
figure = plt.figure()
plt.bar(np.arange(args.latent_size)-0.1, alpha_p.cpu().data.numpy(), width=0.2, align='center', color='g', label='alpha_p')
plt.bar(np.arange(args.latent_size)+0.1, alpha_q.cpu().data.numpy(), width=0.2, align='center', color='b', label='alpha_q')
plt.legend()
buf = io.BytesIO()
plt.savefig(buf, format='jpeg', dpi=200)
plt.close(figure)
buf.seek(0)
image = PIL.Image.open(buf)
image = transforms.ToTensor()(image)
return image
def plot_scatter(pos_x, pos_y, acc):
figure = plt.figure()
plt.scatter(pos_x, pos_y, c=acc)
plt.gca().set_aspect('equal', adjustable='datalim')
plt.colorbar()
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
plt.close(figure)
buf.seek(0)
image = PIL.Image.open(buf)
image = transforms.ToTensor()(image)
return image
def kl_weight_schedule(iter, beta, crossover):
start = 0.0
finish = beta
divisor = 10.0
center_step = crossover
steps_lo_to_hi = center_step/divisor
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
start = torch.tensor(start, device=device)
finish = torch.tensor(finish, device=device)
center_step = torch.tensor(center_step, device=device, dtype=torch.float)
steps_lo_to_hi = torch.tensor(steps_lo_to_hi, device=device, dtype=torch.float)
return start + (finish - start)*torch.sigmoid((torch.tensor(float(iter), device=device) - center_step) * (1./steps_lo_to_hi))
# Modified ELBO loss.
def loss_fn(recon_x, x, q_dist, p_dist, output_all_c, y, iter, args, mode='train'):
# Load the arguments for the loss function.
kl_min = args.kl_min
beta = args.beta
crossover = args.crossover
alpha = args.alpha
mut_info_mode = args.mut_info
norm = args.norm
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
num_free = torch.sum(x == 0).double()
num_occ = torch.sum(x == 1).double()
num_batch = num_free + num_occ
rec_weight_free = 1.0 - num_free/num_batch
rec_weight_occ = 1.0 - num_occ/num_batch
# Repeat from: latent_size x grid_flat to batch x latent_size x grid flat.
if recon_x.shape[0] != x.shape[0]:
recon_x = recon_x.flatten(start_dim=1).unsqueeze(0)
recon_x = recon_x.repeat(x.shape[0], 1, 1)
# Repeat from: batch_size x grid_flat to batch x latent_size x grid flat.
x = x.flatten(start_dim=1).unsqueeze(1)
x = x.repeat(1, recon_x.shape[1], 1)
else:
recon_x = recon_x.flatten(start_dim=1)
x = x.flatten(start_dim=1)
mask_free = (x == 0).float()
mask_occ = (x == 1).float()
bce = torch.nn.BCELoss(reduction='none')
rec_loss = bce(recon_x, x)
if norm:
# Weighted sum using q probabilities along latent_size dimension.
rec_loss_weighted = rec_loss*mask_occ*rec_weight_occ + rec_loss*mask_free*rec_weight_free
rec_loss_weighted = rec_loss_weighted.sum(-1)
# Check if we have all the latent classes or just one.
if len(recon_x.shape) > 2:
rec_loss_weighted = (rec_loss_weighted*q_dist.probs).sum(1).mean(0)
else:
rec_loss_weighted = rec_loss_weighted.mean(0)
else:
rec_loss_weighted = rec_loss.sum(-1)
# Check if we have all the latent classes or just one.
if len(recon_x.shape) > 2:
rec_loss_weighted = (rec_loss_weighted*q_dist.probs).sum(1).mean(0)
else:
rec_loss_weighted = rec_loss_weighted.mean(0)
# Check if we have all the latent classes or just one.
if len(recon_x.shape) > 2:
# Sum over grid dimension because the grid cells are independent.
rec_loss_occ = ((rec_loss*mask_occ*rec_weight_occ).sum(-1)*q_dist.probs).sum(1).mean(0)
rec_loss_free = ((rec_loss*mask_free*rec_weight_free).sum(-1)*q_dist.probs).sum(1).mean(0)
rec_loss = (rec_loss.sum(-1)*q_dist.probs).sum(1).mean(0)
else:
# Sum over grid dimension because the grid cells are independent.
rec_loss_occ = ((rec_loss*mask_occ*rec_weight_occ).sum(-1)).mean(0)
rec_loss_free = ((rec_loss*mask_free*rec_weight_free).sum(-1)).mean(0)
rec_loss = (rec_loss.sum(-1)).mean(0)
if mode == 'train':
kl_weight = kl_weight_schedule(iter, beta, crossover)
kl_disc_loss = torch.distributions.kl.kl_divergence(q_dist, p_dist)
if torch.any(torch.isinf(kl_disc_loss)):
pdb.set_trace()
kl_disc_loss = torch.mean(kl_disc_loss, dim=0, keepdim=False)
if kl_min > 0:
kl_lower_bounded = torch.clamp(kl_disc_loss, min=kl_min)
dist = p_dist.__class__
H_y = dist(probs=p_dist.probs.mean(dim=0)).entropy()
mutual_info = (H_y - p_dist.entropy().mean(dim=0)).sum()
if mut_info_mode == 'None':
mi_weight = 0.0
elif mut_info_mode == 'kl':
mi_weight = kl_weight_schedule(iter, alpha, crossover)
elif mut_info_mode == 'const':
mi_weight = alpha
loss = rec_loss_weighted + kl_weight*kl_disc_loss - mi_weight*mutual_info
elif mode == 'inf':
loss = rec_loss_weighted
kl_disc_loss = 0.0
kl_weight = 0.0
mutual_info = 0.0
mse_c_loss = 0.0
return loss, rec_loss, kl_disc_loss, kl_weight, rec_loss_occ, rec_loss_free, mutual_info
def train(data_loader, vae, optimizer, args, writer, tracker_global_train, epoch, num_train):
vae.train()
for iteration, (y, x, sources) in enumerate(data_loader):
# Reshape features.
optimizer.zero_grad()
start = time.time()
x = to_var(x).float().view(x.shape[0],1,20,30)
y = to_var(y).float()
batch_size = x.shape[0]
start_new = time.time()
recon_x, alpha_q, alpha_p, alpha_q_lin, alpha_p_lin, output_all_c, z = vae(x, y)
recon_x_inf_p_1, _, _, _, _ = vae.inference(n=1, c=y, mode='most_likely')
# Add epsilon to q and p if elements of q are close to zero (KL will be Inf otherwise).
if torch.min(alpha_q) < 1e-15:
alpha_q = alpha_q.clone() + 1e-15
if torch.min(alpha_p) < 1e-15:
alpha_p = alpha_p.clone() + 1e-15
# Form distributions out of alpha_q and alpha_p.
q_dist = torch.distributions.one_hot_categorical.OneHotCategorical(probs=alpha_q)
p_dist = torch.distributions.one_hot_categorical.OneHotCategorical(probs=alpha_p)
start_new = time.time()
M_N = batch_size/num_train*1.0
loss, rec, kl, kl_weight, rec_loss_occ, rec_loss_free, mutual_info = loss_fn(recon_x, x, q_dist, p_dist, output_all_c, y, epoch*len(data_loader)+iteration, args)
loss_inf, rec_inf, _, _, rec_loss_occ_inf, rec_loss_free_inf, _ = loss_fn(recon_x_inf_p_1, x, q_dist, p_dist, output_all_c, y, epoch*len(data_loader)+iteration, args, mode='inf')
writer.add_scalar('train/Loss', (loss.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss/KL', (kl.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss/Mutual Information', (mutual_info.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss/Reconstruction', (rec.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss/Reconstruction Occ', (rec_loss_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss/Reconstruction Free', (rec_loss_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss Inf', (loss_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss Inf/Reconstruction', (rec_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss Inf/Reconstruction Occ', (rec_loss_occ_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Loss Inf/Reconstruction Free', (rec_loss_free_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/KL Weight', kl_weight, torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
tracker_global_train['loss'] = torch.cat((tracker_global_train['loss'], (loss.data).unsqueeze(-1)))
tracker_global_train['it'] = torch.cat((tracker_global_train['it'], torch.Tensor([epoch*len(data_loader)+iteration]).cuda()))
start_new = time.time()
recon_x_inf_p = recon_x[torch.argmax(q_dist.probs, dim=-1)]
recon_x_inf_p = recon_x_inf_p.view(x.shape)
recon_x_inf_pred = (recon_x_inf_p >= 0.6).float()
recon_x_inf_pred[recon_x_inf_p <= 0.4] = 0.0
recon_x_inf_pred[(recon_x_inf_p < 0.6)*(recon_x_inf_p > 0.4)] = 0.5
acc = torch.mean((recon_x_inf_pred == x).float())
mse = torch.mean(torch.pow(recon_x_inf_p - x, 2))
acc_occ = torch.mean((recon_x_inf_pred[x == 1] == 1).float())
acc_free = torch.mean((recon_x_inf_pred[x == 0] == 0).float())
mse_occ = torch.mean(torch.pow(recon_x_inf_p[x == 1] - 1, 2))
mse_free = torch.mean(torch.pow(recon_x_inf_p[x == 0] - 0, 2))
writer.add_scalar('train/Metrics/MSE', (mse.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Metrics/Accuracy', (acc.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Metrics/MSE Occ', (mse_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Metrics/MSE Free', (mse_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Metrics/Accuracy Occ', (acc_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('train/Metrics/Accuracy Free', (acc_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
if iteration % args.print_every == 0 or iteration == len(data_loader)-1:
print("Batch %04d/%i, Loss %9.4f"%(iteration, len(data_loader)-1, loss.data))
print("recon_x", torch.max(recon_x_inf_p).data)
print("recon", loss.data, "kl", kl.data, mutual_info.data)
pred = []
gt = []
sample_numbers = np.random.choice(np.arange(y.shape[0]), size=5, replace=False)
for i in sample_numbers:
gt.append(torch.reshape(x[i], (-1,20,30)))
pred.append(torch.reshape(recon_x_inf_p[i], (-1,20,30)))
writer.add_image('Train/Occupancy Grid Ground Truth', torchvision.utils.make_grid(gt, nrow=len(gt)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_image('Train/Occupancy Grid Reconstructed', torchvision.utils.make_grid(pred, nrow=len(pred)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
loss.backward()
optimizer.step()
def test(data_loader, vae, optimizer, args, writer, epoch, num_val, nt):
vae.eval()
num = 0.0
loss_inf_total = torch.zeros(1).cuda()
with torch.no_grad():
for iteration, (y, x, sources) in tqdm(enumerate(data_loader)):
# Reshape features.
num += y.shape[0]
x = to_var(x).float().view(x.shape[0],1,20,30)
y = to_var(y).float()
y_full = unnormalize(y.cpu().data.numpy(), nt)
pos_x = y_full[:,:,0]
pos_y = y_full[:,:,1]
batch_size = x.shape[0]
recon_x, alpha_q, alpha_p, alpha_q_lin, alpha_p_lin, output_all_c, z = vae(x, y)
if torch.min(alpha_q) < 1e-15:
alpha_q = alpha_q.clone() + 1e-15
if torch.min(alpha_p) < 1e-15:
alpha_p = alpha_p.clone() + 1e-15
# Form distributions out of alpha_q and alpha_p.
q_dist = torch.distributions.one_hot_categorical.OneHotCategorical(probs=alpha_q)
p_dist = torch.distributions.one_hot_categorical.OneHotCategorical(probs=alpha_p)
loss, rec, kl, kl_weight, rec_loss_occ, rec_loss_free, mutual_info = loss_fn(recon_x, x, q_dist, p_dist, output_all_c, y, epoch*len(data_loader)+iteration, args)
# Get reconstruction from inference (all latent classes).
sample_numbers = np.random.choice(np.arange(y.shape[0]), size=np.minimum(5, y.shape[0]), replace=False)
if ((y.shape[0] > 5) and (num == y.shape[0])):
recon_x_inf_a, _, _, _, _ = vae.inference(n=args.latent_size, c=y[sample_numbers[0]].view(1, y.shape[1], y.shape[2]), mode='all')
recon_x_inf_p, _, _, _, _ = vae.inference(n=1, c=y, mode='most_likely')
loss_inf, rec_inf, _, _, rec_loss_occ_inf, rec_loss_free_inf, _= loss_fn(recon_x_inf_p, x, q_dist, p_dist, output_all_c, y, epoch*len(data_loader)+iteration, args, mode='inf')
loss_inf_total += loss_inf*y.shape[0]
# Most likely class.
recon_x = recon_x[torch.argmax(q_dist.probs, dim=-1)]
recon_x = recon_x.view(x.shape)
if ((y.shape[0] > 5) and (num == y.shape[0])):
recon_x_inf_a = recon_x_inf_a.view(args.latent_size, 1, x.shape[-2], x.shape[-1])
recon_x_inf_p = recon_x_inf_p.view(x.shape)
recon_x_inf_pred = (recon_x_inf_p >= 0.6).float()
recon_x_inf_pred[recon_x_inf_p <= 0.4] = 0.0
recon_x_inf_pred[(recon_x_inf_p < 0.6)*(recon_x_inf_p > 0.4)] = 0.5
acc = torch.mean((recon_x_inf_pred == x).float())
mse = torch.mean(torch.pow(recon_x_inf_p - x, 2))
acc_occ = torch.mean((recon_x_inf_pred[x == 1] == 1).float())
acc_free = torch.mean((recon_x_inf_pred[x == 0] == 0).float())
mse_occ = torch.mean(torch.pow(recon_x_inf_p[x == 1] - 1, 2))
mse_free = torch.mean(torch.pow(recon_x_inf_p[x == 0] - 0, 2))
if ((y.shape[0] > 5) and (num == y.shape[0])):
y_plot = []
for i in sample_numbers:
y_plot.append(plot_scatter(pos_x[i], pos_y[i], range(len(pos_x[i]))))
writer.add_image('Decoded Latent Classes/Input States', torchvision.utils.make_grid(y_plot, nrow=5), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_image('Decoded Latent Classes/a', torchvision.utils.make_grid(recon_x_inf_a, nrow=int(np.sqrt(args.latent_size))), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss', (loss.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss/KL', (kl.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss/Mutual Information', (kl.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss/Reconstruction', (rec.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss/Reconstruction Occ', (rec_loss_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss/Reconstruction Free', (rec_loss_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss Inf', (loss_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss Inf/Reconstruction', (rec_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss Inf/Reconstruction Occ', (rec_loss_occ_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Loss Inf/Reconstruction Free', (rec_loss_free_inf.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/MSE', (mse.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/MSE Occ', (mse_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/MSE Free', (mse_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/Accuracy', (acc.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/Accuracy Occ', (acc_occ.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_scalar('val/Metrics/Accuracy Free', (acc_free.data).unsqueeze(-1), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
pred_p = []
pred_q = []
gt = []
alpha_plot = []
for i in sample_numbers:
gt.append(torch.reshape(x[i], (-1,20,30)))
pred_q.append(torch.reshape(recon_x[i], (-1,20,30)))
pred_p.append(torch.reshape(recon_x_inf_p[i], (-1,20,30)))
if (num == y.shape[0]):
alpha_plot.append(plot_bar(alpha_p[i], alpha_q[i], args))
writer.add_image('Val/Occupancy Grid Ground Truth', torchvision.utils.make_grid(gt, nrow=len(gt)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_image('Val/p Occupancy Grid Reconstructed', torchvision.utils.make_grid(pred_p, nrow=len(pred_q)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
writer.add_image('Val/q Occupancy Grid Reconstructed', torchvision.utils.make_grid(pred_q, nrow=len(pred_p)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
if (num == y.shape[0]):
writer.add_image('Val/alpha', torchvision.utils.make_grid(alpha_plot, nrow=len(alpha_plot)), torch.Tensor([epoch*len(data_loader)+iteration]).cuda())
loss_inf_total = loss_inf_total/float(num)
print("Batch %04d/%i, Loss %9.4f"%(iteration, len(data_loader)-1, loss.data))
print("recon_x", torch.max(recon_x).data)
print("recon", rec.data, "kl", kl.data)
return loss_inf_total.item()
def gradient_vis(parameters, writer, num_epoch):
for name, param in parameters:
if ((param.requires_grad) and (type(param.grad)!=type(None))):
writer.add_scalar('Gradients/'+str(name)+'_avg', param.grad.abs().mean(), num_epoch)
writer.add_scalar('Gradients/'+str(name)+'_max', param.grad.abs().max(), num_epoch)
elif(param.grad==None):
print(name)
def main(args):
random = deepcopy(seed)
ts = time.time()
# Load data.
nt = 10
dir = '/data/INTERACTION-Dataset-DR-v1_1/processed_data/driver_sensor_dataset/'
models = 'cvae'
dir_models = os.path.join('/models/', models)
args.fig_root = dir_models
if not os.path.isdir(dir_models):
os.mkdir(dir_models)
print('Directory exists.')
data_file_states = os.path.join(dir, 'states_shuffled_train.hkl')
data_file_grids = os.path.join(dir, 'label_grids_shuffled_train.hkl')
data_file_sources = os.path.join(dir, 'sources_shuffled_train.hkl')
datasets = OrderedDict()
datasets['train'] = SequenceGenerator(data_file_state=data_file_states, data_file_grid=data_file_grids, source_file=data_file_sources, nt=nt,
batch_size=None, shuffle=True, sequence_start_mode='all', norm=True)
print(len(datasets['train']))
num_train = len(datasets['train'])
# Test on validation data.
data_file_states = os.path.join(dir, 'states_val.hkl')
data_file_grids = os.path.join(dir, 'label_grids_val.hkl')
data_file_sources = os.path.join(dir, 'sources_val.hkl')
datasets['val'] = SequenceGenerator(data_file_state=data_file_states, data_file_grid=data_file_grids, source_file=data_file_sources, nt=nt,
batch_size=None, shuffle=False, sequence_start_mode='unique', norm=True)
print(len(datasets['val']))
num_val = len(datasets['val'])
tracker_global_train = defaultdict(torch.cuda.FloatTensor)
tracker_global_test = defaultdict(torch.cuda.FloatTensor)
name = 'lstm_' + str(args.n_lstms) + '_Adam_z_' + str(args.latent_size) + '_lr_' + str(args.learning_rate) + '_rand_' + str(random) + '_norm_' + str(args.norm) + '_kl_start_0_finish_' + str(args.beta) + '_center_' + str(args.crossover) + '_mutual_info_' + args.mut_info + '_alpha_' + str(args.alpha) +'_epochs_' + str(args.epochs) + '_batch_' + str(args.batch_size)
folder_name = str(ts) + "_" + name
if not os.path.exists(os.path.join(args.fig_root, folder_name)):
if not(os.path.exists(os.path.join(args.fig_root))):
os.mkdir(os.path.join(args.fig_root))
os.mkdir(os.path.join(args.fig_root, folder_name))
writer = SummaryWriter(os.path.join(dir_models, 'runs/' + str(ts) + '_' + name))
vae = VAE(
encoder_layer_sizes_p=args.encoder_layer_sizes_p,
n_lstms=args.n_lstms,
latent_size=args.latent_size,
dim=args.dim
)
vae = vae.cuda()
optimizer = torch.optim.Adam(vae.parameters(), lr=args.learning_rate)
best_loss = -1.
save_filename = name + 'best_p.pt'
for epoch in tqdm(range(args.epochs)):
tracker_epoch = defaultdict(lambda: defaultdict(dict))
for split, dataset in datasets.items():
print("split", split, epoch)
if split == 'train':
batch_size = args.batch_size
else:
batch_size = int(len(dataset)/10)
data_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=split=='train', drop_last=True)
if split == 'train':
train(data_loader, vae, optimizer, args, writer, tracker_global_train, epoch, num_train)
gradient_vis(vae.named_parameters(), writer, epoch)
else:
loss = test(data_loader, vae, optimizer, args, writer, epoch, num_val, nt)
if epoch == args.epochs-1:
with open(os.path.join(dir_models, name + '_epoch_' + str(args.epochs) + '_vae.pt'), 'wb') as f:
torch.save(vae.state_dict(), f)
if ((epoch == 0) or (epoch == 5) or (loss < best_loss)):
best_loss = loss
with open(os.path.join(dir_models, save_filename), 'wb') as f:
torch.save(vae.state_dict(), f)
# Plot losses.
plt.plot(tracker_global_train['it'].data.cpu().numpy(), tracker_global_train['loss'].data.cpu().numpy())
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.savefig(os.path.join(dir_models, folder_name, "loss.png"))
plt.clf()
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--kl_min", type=float, default=0.2)
parser.add_argument("--encoder_layer_sizes_p", type=list, default=[7, 5])
parser.add_argument("--n_lstms", type=int, default=1)
parser.add_argument("--alpha", type=float, default=1.0)
parser.add_argument("--beta", type=float, default=1.0)
parser.add_argument("--crossover", type=float, default=10000.0)
parser.add_argument("--mut_info", type=str, default='None')
parser.add_argument("--norm", action='store_true')
parser.add_argument("--dim", type=int, default=4)
parser.add_argument("--latent_size", type=int, default=100)
parser.add_argument("--print_every", type=int, default=1000)
parser.add_argument("--fig_root", type=str, default='figs')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
281317 | """Elements which wrap the sqlfluff core library for public use."""
# flake8: noqa: F401
# Expose the simple api
from sqlfluff.api.simple import lint, fix, parse
from sqlfluff.api.info import list_rules, list_dialects
| StarcoderdataPython |
1933998 | val = [-1, 13, -29, 5, 41, 76, -33, 72, -45, -39, 51, 82, -55, 0, 68, 79, 63, 94, -90, 89, 99]
neg = list(filter(lambda x: x < 0, val))
print(neg)
| StarcoderdataPython |
1669185 | <reponame>Vino2001-hub/the-creative-vipers
#function for subtraction
def doSubtraction:
a=68
b=10
return (a-b)
result=doSubtraction() #subtraction function is invoked
print("Subtracte 10 from 68 is =",result)
| StarcoderdataPython |
9709417 | <gh_stars>0
import subprocess
# import asyncio
def secs_to_hhmm(secs):
t = int((secs + 30) / 60)
h = int(t / 60)
m = int(t % 60)
return h, m
def ping(ip):
cp = subprocess.run(['ping', '-c', '4', ip], stdout=subprocess.PIPE)
res = cp.stdout.decode()
return res
"""
async def ping(ip):
proc = await asyncio.create_subprocess_exec('ping', '-c', '4', ip, stdout=subprocess.PIPE)
await proc.wait()
# cp = subprocess.run(['ping', '-c', '4', ip], stdout=subprocess.PIPE)
res = proc.stdout.decode()
return res
"""
def nmcli_c():
cp = subprocess.run(['nmcli', 'c'], stdout=subprocess.PIPE)
res = cp.stdout.decode()
return res
| StarcoderdataPython |
8020432 | # Generated by Django 2.0.8 on 2018-09-11 15:00
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
class Migration(migrations.Migration):
dependencies = [
('content', '0039_auto_20180911_1536'),
]
operations = [
migrations.AlterField(
model_name='outletpage',
name='menu',
field=wagtail.core.fields.StreamField([('document_link', wagtail.core.blocks.StructBlock([('link', wagtail.documents.blocks.DocumentChooserBlock(required=True)), ('title', wagtail.core.blocks.CharBlock(required=False))]))]),
),
]
| StarcoderdataPython |
5003086 | from flask import Flask, make_response, render_template
import yaha_analyzer
import plotly.plotly as py
import plotly
import plotly.graph_objs as go
import json
import sys
import collectobot
CSV_HEADER = 'Content-Disposition'
app = Flask(__name__)
@app.route('/')
def index():
scrape = yaha_analyzer.yaha_analyzer()
deck_data, card_data = scrape.get_name_list()
return render_template('front.html', title = 'Yaha', active=generate_active_status('index'), deck_data=deck_data, card_data=card_data, game_count = 20)
@app.route('/card/<card_name>')
def card(card_name):
scrape = yaha_analyzer.yaha_analyzer()
graphJSON = scrape.get_graph_data(card_name)
game_count = 20
return render_template('matchups.html', title = card_name, page_name = card_name, active=generate_active_status('card'), graphJSON=graphJSON, game_count = game_count, ids = ['Heatmap', 'Win Counts With {} in Decks'.format(card_name), 'Win Counts With {} Against Decks'.format(card_name), 'Lose Counts With', 'Lose Counts Against'])
@app.route('/deck/<deck>')
def return_deck(deck):
deck = deck.replace(' ', '_')
scrape = yaha_analyzer.yaha_analyzer()
graphJSON = scrape.get_graph_data(deck)
game_count = 20
return render_template('matchups.html', title = deck, page_name = deck, active = generate_active_status('deck'), graphJSON = graphJSON, game_count = game_count, ids = ['Heatmap'])
@app.route('/decks')
def return_decks():
scrape = yaha_analyzer.yaha_analyzer()
deck_data, card_data = scrape.get_name_list()
return render_template('front.html', title = 'Decks', active = generate_active_status('deck'), deck_data=deck_data, card_data=[], game_count = 20)
@app.route('/cards')
def return_cards():
scrape = yaha_analyzer.yaha_analyzer()
deck_data, card_data = scrape.get_name_list()
return render_template('front.html', title = 'Cards', active=generate_active_status('card'), deck_data=[], card_data=card_data, game_count = 20)
@app.route('/rebuild')
def rebuild():
scrape = yaha_analyzer.yaha_analyzer()
scrape.rebuild_and_update()
@app.route('/remake')
def remake():
scrape = yaha_analyzer.yaha_analyzer()
scrape.remake_graphs()
def generate_active_status(active_element):
if active_element == 'index':
return ['active', '', '']
elif active_element == 'deck':
return ['', 'active', '']
elif active_element == 'card':
return ['', '', 'active']
def remove_underscore(names):
return list(map(lambda x: x.replace('_', ' '), names))
| StarcoderdataPython |
4853905 | <filename>pyserver/planner/routed_p2/route_finder.py
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# BUG nnnn: 2012.09.26: Search for a route from DT Mpls to DT St Paul (i.e.,
# the 94) at 9 AM and you may have to bike to a slow bus -- why
# isn't the express bus suggested?
import os
# FIXME: Bug NNNN: Replace sqlite3 table with postgres table, and just keep a
# copy of the transit db alongside the other cyclopath tables, but in a
# different schema.
# ^^^ Maybe this doesn't matter. Is the sqlite3 db closed once the route finder
# is loaded? Is the load time so slow that we should bother importing into
# Postgres? Is there an easy way to import a sqlite3 file into Postgres?
import sqlite3
import time
import traceback
# 2013.11.18: This is new:
# /ccp/dev/cp/pyserver/planner/routed_p2/route_finder.py:24: UserWarning:
# "Module osgeo was already imported from
# /ccp/opt/usr/lib/python2.7/site-packages/GDAL-1.10.1-py2.7-linux-x86_64.egg
# /osgeo/__init__.py, but /usr/lib64/python2.7/site-packages is being added
# to sys.path"
from pkg_resources import require
require("Graphserver>=1.0.0")
from graphserver.core import Crossing
from graphserver.core import GenericPyPayload
from graphserver.core import Link
from graphserver.core import State
from graphserver.core import TripAlight
from graphserver.core import TripBoard
from graphserver.core import WalkOptions
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
import conf
import g
from gwis.exception.gwis_error import GWIS_Error
from item.feat import byway
from item.feat import node_endpoint
from item.feat import route_step
from item.feat import route_stop
from item.util.item_query_builder import Item_Query_Builder
from planner.problem_base import Problem_Base
from planner.travel_mode import Travel_Mode
from planner.routed_p2.payload_byway import Payload_Byway
from util_ import db_glue
from util_ import geometry
from util_ import gml
from util_ import misc
__all__ = ['Problem']
log = g.log.getLogger('route_finder/p2')
class Problem(Problem_Base):
__slots__ = (
'req',
'gserver',
'beg_addr',
'fin_addr',
'beg_vertex_id',
'fin_vertex_id',
'beg_xy',
'fin_xy',
'rating_func',
'rating_min',
'p1_priority',
'xy_crow_flies',
'p2_depart_at',
'p2_transit_pref',
'is_reverse', # FIXME: Not implemented.
'depart_time',
'walk_opts',
'phase_change_grade',
'phase_change_velocity_factor',
'spt_vertex_id',
'db_gtfs',
'qb',
)
# *** Constructor
#
def __init__(self, req, graph, rt,
beg_vertex_id, fin_vertex_id, rating_func, rating_min,
beg_xy, fin_xy, is_reverse=False):
'''Finds a bike/transit route between the start and end vertices using
the given graph. Uses the departure time and transit preference to
determine when to route using transit, and when to route using
bicycling.'''
self.req = req # FIXME: Use to check branch, rev, and username?
self.gserver = graph.gserver
self.beg_vertex_id = str(beg_vertex_id)
self.fin_vertex_id = str(fin_vertex_id)
self.rating_func = rating_func
self.rating_min = rating_min # FIXME: Not used?
self.p1_priority = rt.p1_priority
self.beg_xy = beg_xy
self.beg_addr = rt.beg_addr
self.fin_xy = fin_xy
self.fin_addr = rt.fin_addr
self.xy_crow_flies = 0.0
self.p2_depart_at = rt.p2_depart_at
if not self.p2_depart_at:
raise GWIS_Error('Please specify p2_depart_at')
self.p2_transit_pref = rt.p2_transit_pref
self.depart_time = None
self.walk_opts = None
self.phase_change_grade = None
self.phase_change_velocity_factor = None
# FIXME: Carol has this coded but it's never True
g.assurt(not is_reverse) # FIXME: Not implemented
self.is_reverse = is_reverse
# Internal members
self.spt_vertex_id = None
self.db_gtfs = None
#
self.qb = None
# *** Public interface
#
def solve(self, qb):
'''Solves the problem of getting from point A to point B.'''
time_0 = time.time()
log.debug('solve: solving...')
# FIXME_2013_06_14:
# FIXME/EXPLAIN: Why is qb passed to fcns. when also set as instance var?
# oh, also: up until 2013.06.14 no one noticed solve(qb) for p2 but solve() p1
self.qb = qb
# Get handles to the two databases
self.db_gtfs = GTFSDatabase(conf.transitdb_filename)
# Get the walk options used to calculate costs.
# FIXME: Most walk options are currently hard-coded.
self.walk_opts = self.get_walk_options()
# The walk opts members are immutable, so save a ref to us for the Edge
Payload_Byway.outstanding_problems[self.walk_opts.soul] = self
# Convert the departure time string into seconds-since-epoch.
depart_time = Problem.date_flashclient_mktime(self.p2_depart_at)
# FIXME: Make this adjustment settable.
slack = 5 * 60 # Number of seconds to wait at the first transit stop.
self.depart_time = depart_time + slack
# Ask Graphserver for the shortest path tree.
spt = self.graphserver_get_spt()
# Ask Graphserver for the lists of vertices and edges.
(vertices, edges) = self.graphserver_get_path(spt)
rsteps = route_step.Many()
rstops = route_stop.Many()
path_len = 0.0
if len(vertices) > 0:
# Convert the path into route steps we can send back to the client.
(rsteps, rstops, path_len,) = self.path_convert(qb, vertices, edges)
# Adjust bicycle route steps' times according to transit edges.
# (That is, don't have the user get to the transit stop 30 mins.
# early.)
self.steps_adjust_jit_arrival(rsteps, slack)
# else, we'll raise in a moment, after cleaning up.
del Payload_Byway.outstanding_problems[self.walk_opts.soul]
# Destroy Graphserver C-objects
spt.destroy()
self.walk_opts.destroy()
self.walk_opts = None
log.debug('solve: route of %d steps found in %s'
% (len(rsteps), misc.time_format_elapsed(time_0),))
if not rsteps:
log.error('solve: route not found?: %s' % (self,))
# FIXME: This error message is not really "Help"ful. Why did the
# request fail? What specifically can the user do to fix the problem?
# SYNC_ME: This error message shared with routed_p1/route_finder.py.
#raise GWIS_Error(
# 'No route exists. Click "Help" for ideas on what to do next.')
raise GWIS_Error(Problem_Base.error_msg_basic)
# The path cost returned here is just for debugging. We'll compute it
# later, anyway, so just toss back a negative.
path_cost = -1.0
return (rsteps, rstops, path_cost, path_len,)
# *** First tier solve() helpers
#
def get_walk_options(self):
# FIXME: WalkOptions is immutable and not completed wired into Python.
# We have to set some options that Graphserver uses (FIXME:
# Enumerate those options), but some of the options are only used
# by core/edgetypes/street.c, which we don't use. So I [lb] think
# we should probably make our own object and not worry about this
# one.
# See graphserver/core.py, where all these options are defined.
# See also graphserver/core/walkoption.c for the defaults:
# FIXME: Magic Numbers. These should be user-choosable. See Cycloplan 2.
walkoptions = WalkOptions()
# The transfer_penalty is the no. of seconds penalty for each boarding.
# Increase if routes contain frivolous transfers, or decrease if routes
# avoiding all buses/trains/transit.
walkoptions.transfer_penalty = 5000
# NOTE: We don't care about turn_penalty; we have our own alg.
walkoptions.turn_penalty = 120
walkoptions.walking_speed = 4.5 # in meters per sec; approx 10 mph
walkoptions.uphill_slowness = 0.05
walkoptions.downhill_fastness = -12.1
#walkoptions.phase_change_grade = 0.045;
walkoptions.hill_reluctance = 0.0
walkoptions.max_walk = 10000 # meters
walkoptions.walking_overage = 0.1
log.debug('get_walk_options: walk_ops.soul: %s' % (walkoptions.soul))
# Graphserver defines these in walkoptions.c, but not in core.py. Hrm.
# And we can't attach them to walkoptions because that's a C object.
# And I [lb] quote: "Grade. An interesting thing thing happens at a
# particular grade, when they settle in for a long slog."
self.phase_change_grade = 0.045;
# From graphserver: "velocity between 0 grade and the phase change grade
# is Ax^2+Bx+C, where A is the phase_change_velocity_factor, B is the
# downhill fastness, and C is the average speed"
# FIXME: See speed_from_grade: this is almost the same calculation,
# expect speed_from_grade uses whatever the grade really is, and this
# uses a static value for the grade....
phase_change_speed = ((walkoptions.uphill_slowness
* walkoptions.walking_speed)
/ (walkoptions.uphill_slowness
+ self.phase_change_grade))
self.phase_change_velocity_factor = (
(phase_change_speed
- (walkoptions.downhill_fastness * self.phase_change_grade)
- walkoptions.walking_speed)
/ (self.phase_change_grade * self.phase_change_grade))
log.debug('get_walk_options: phase_change_grade: %s'
% (self.phase_change_grade,))
log.debug('get_walk_options: phase_change_velocity_factor: %s'
% (self.phase_change_velocity_factor,))
# FIXME: Why waste time with SQL? If crow_flies_sql and crow_flies_raw
# return same results, use latter (_raw) (or maybe timeit first).
crow_flies_sql = self.get_straightline_geom_len_sql(self.beg_xy,
self.fin_xy)
crow_flies_raw = self.get_straightline_geom_len_raw(self.beg_xy,
self.fin_xy)
if abs(crow_flies_raw - crow_flies_sql) > 0.01:
log.warning(
'Unexpectd diffr: xy: beg: %s / fin: %s // crow: sql: %s / raw: %s'
% (self.beg_xy, self.fin_xy, crow_flies_sql, crow_flies_raw,))
self.xy_crow_flies = crow_flies_raw
log.debug('get_walk_options: xy_crow_flies: %s' % (self.xy_crow_flies,))
if self.p2_transit_pref == -4:
walkoptions.walking_reluctance = 0.5
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK
# Don't go negative, unless you want a century spaghetti ride. It
# means the further from the start you are, the more favored the edge.
#walkoptions.walking_overage = -0.1 # favors walking
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == -2:
walkoptions.walking_reluctance = 0.75
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == 0:
# Default value. Don't pref. either transit or biking.
walkoptions.walking_reluctance = 1.0
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK # 1,000 km
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == 2:
walkoptions.walking_reluctance = 1.0
walkoptions.max_walk = int(self.xy_crow_flies * 1.33)
walkoptions.walking_overage = 0.1
elif self.p2_transit_pref == 4:
walkoptions.walking_reluctance = 2.0
walkoptions.max_walk = int(self.xy_crow_flies * 0.66)
walkoptions.walking_overage = 0.1
elif self.p2_transit_pref == 6:
walkoptions.walking_reluctance = 2.0
walkoptions.max_walk = 0
walkoptions.walking_overage = 0.1
else:
g.assurt(False)
log.debug('get_walk_options: relucance: %s / max_bike: %s / overage: %s'
% (walkoptions.walking_reluctance, walkoptions.max_walk,
walkoptions.walking_overage,))
return walkoptions
#
def graphserver_get_spt(self):
time_0 = time.time()
if not self.is_reverse:
log.debug('graphserver_get_spt: forward / shortest_path_tree')
spt_fcn = self.gserver.shortest_path_tree
self.spt_vertex_id = self.fin_vertex_id
else:
log.debug('graphserver_get_spt: reverse / shortest_path_tree_retro')
spt_fcn = self.gserver.shortest_path_tree_retro
self.spt_vertex_id = self.beg_vertex_id
spt = spt_fcn(self.beg_vertex_id, self.fin_vertex_id,
State(1, self.depart_time), self.walk_opts)
log.debug('graphserver_get_spt: %s / spt: %s'
% (misc.time_format_elapsed(time_0),
spt,))
return spt
#
def graphserver_get_path(self, spt):
time_0 = time.time()
log.debug('graphserver_get_path: calling spt.path...')
vertices = []
edges = []
try:
(vertices, edges) = spt.path(self.spt_vertex_id)
#for vertex in vertices:
# log.debug('graphserver_get_path: vertex.label: %s' % vertex.label)
#for edge in edges:
# log.debug('graphserver_get_path: edge: %s' % (edge,))
except Exception, e:
# BUG 2286: If Graphserver cannot find a path, e.g., to "Ridgedale
# Mall", it raises, e.g., "Exception: A path to 1302090 could not be
# found".
log.error('Unable to find a route: "%s" / %s'
% (str(e), traceback.format_exc(),))
#raise GWIS_Error('Unable to find a route: %s' % (str(e),))
raise GWIS_Error(Problem_Base.error_msg_basic)
finally:
log.debug('graphserver_get_path: %s / v. cnt: %d / e. cnt: %d'
% (misc.time_format_elapsed(time_0),
len(vertices), len(edges),))
return (vertices, edges,)
#
# FIXME: This fcn. is obnoxiously long: split into into multiple fcns.
def path_convert(self, qb, vertices, edges):
# FIXME: should these be route_step.Many() and route_stop.Many()?
route_steps = []
route_stops = []
last_alight = None
last_board = None
path_len = 0.0
time_0 = time.time()
log.debug('path_convert: making route steps...')
for i in xrange(0, len(edges)):
# NOTE: See class Edge in graphserver/core.py.
edge = edges[i]
beg_node = vertices[i]
fin_node = vertices[i+1]
# Handle byway.
if isinstance(edge.payload, Payload_Byway):
# FIXME: 2012.09.26: Is this still true?
# Should not get a byway after a board edge without another edge.
g.assurt(last_board is None)
byway_step = self.make_route_step_bicycle(
edge.payload, beg_node, fin_node)
log.debug('path_convert: Byway step: beg_nid: %d / fin_nid: %d'
% (byway_step.beg_node_id, byway_step.fin_node_id,))
if i == 0:
# For the first step, create a beginning stop.
log.debug('path_convert: Adding first stop for byway step.')
stop = route_stop.One(
qb, row={'name': self.beg_addr,
'is_pass_through': False,
'is_transit_stop': False,})
stop.fit_route_step(byway_step, True)
route_stops.append(stop)
else:
g.assurt(len(route_stops) > 0) # must have at least 1 by now
if last_alight is not None:
log.debug('Adding stop between TripAlight and Payload_Byway.')
# Handle the case where we have a TripAlight and then
# BywayPayload: we have to create the missing link (route stop).
stop = route_stop.One(
qb, row={'name': last_alight.fin_sta_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(byway_step, True)
route_stops.append(stop)
last_alight = None
# else: the last step was not an alight, so no stop missing.
# Make any previous transit step's node id match up with this step
# and repair the node_id of the last transit stop.
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.transit)):
node_id = (byway_step.beg_node_id if byway_step.forward else
byway_step.fin_node_id)
if route_steps[-1].forward:
route_steps[-1].fin_node_id = node_id
else:
route_steps[-1].beg_node_id = node_id
if ((route_stops[-1].is_transit_stop)
and (route_stops[-1].node_id is None)):
route_stops[-1].node_id = node_id
path_len += byway_step.edge_length
# Push the byway step onto the list of steps.
route_steps.append(byway_step)
# Handle link (which links a transit edge and a Cyclopath edge, i.e.,
# byway->transit and transit->byway transitions).
elif isinstance(edge.payload, Link):
# FIXME: 2012.09.26: Is this still true?
# We should not get a link after a board edge without a transit
# edge in between.
g.assurt(last_board is None)
link_step = self.make_route_step_link(qb, edge, beg_node, fin_node)
log.debug('Encountered link step')
if last_alight is not None:
# We are a link after the alight, so steal its metadata.
# EXPLAIN: When does Link follow TripAlight
# vs. when does Payload_Byway follow TripAlight?
link_step.step_name = last_alight.step_name
# Create a stop for the previous alight.
stop = route_stop.One(
qb, row={'name': last_alight.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
log.debug('Adding stop after last TripAlight.')
if ((i < (len(edges) - 1))
and (isinstance(edges[i + 1].payload, TripBoard))):
# Put stop at the start of the link.
stop.fit_route_step(link_step, True)
else:
# Put stop at the end of the link.
stop.fit_route_step(link_step, False)
route_stops.append(stop)
last_alight = None
# repair node_ids of the step if possible
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.bicycle)):
# grab a node id from the previous bike step
if route_steps[-1].forward:
node_id = route_steps[-1].fin_node_id
else:
node_id = route_steps[-1].beg_node_id
if link_step.forward:
link_step.beg_node_id = node_id
else:
link_step.fin_node_id = node_id
# Push the link step onto the list of steps.
route_steps.append(link_step)
# Handle non-Link transit: TripAlight, TripBoard, and Crossing.
else:
tstep = self.make_route_step_transit(qb, edge, beg_node, fin_node)
# Handle TripAlights.
if isinstance(edge.payload, TripAlight):
log.debug('Encountered TripAlight, storing for later')
# Just store the alight in last_alight. A stop will be created
# at the end of the loop or at the next byway/link encountered.
last_alight = tstep
# Handle TripBoards.
elif isinstance(edge.payload, TripBoard):
log.debug('Encountered TripBoard step')
# Create a stop at the end of the previous step's geometry,
# or at the start of the next if this is the first edge.
stop = route_stop.One(
qb, row={'name': tstep.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
if len(route_steps) > 0:
log.debug('Creating stop for TripBoard.')
# If the previous step is a bicycle step, place at the end.
# If the previous step is a link, it's a weird situation
# where (Alight - Link - Board) so place at the end.
# And node_id is only set if the last step was a bike step.
# FIXME: Are we showing transit boardings at the Cyclopath
# node_endpoint or at the transit stop's actual x,y
# coordinates?
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
# Push metadata onto previous link.
if ((route_steps[-1].step_name is None)
and (route_steps[-1].travel_mode
== Travel_Mode.transit)):
route_steps[-1].step_name = tstep.step_name
else:
# Store the TripBoard to be processed by the next Crossing
# step.
# FIXME: Make this verbose...
log.debug('Storing tripBoard for later use.')
last_board = tstep
# Handle Crossings.
elif isinstance(edge.payload, Crossing):
# FIXME: log.verbose
log.debug('Encountered Crossing edge')
if last_board is not None:
# Add the stop.
# FIXME: log.verbose
log.debug('Adding stop from previously stored TripBoard.')
stop = route_stop.One(
qb, row={'name': last_board.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(tstep, True)
route_stops.append(stop)
last_board = None
# Repair the node_ids of the step if possible.
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.bicycle)):
# Grab a node id from the previous bike step.
if route_steps[-1].forward:
node_id = route_steps[-1].fin_node_id
else:
node_id = route_steps[-1].beg_node_id
if link_step.forward:
link_step.beg_node_id = node_id
else:
link_step.fin_node_id = node_id
route_steps.append(tstep)
# No other edge types.
else:
# This code should be unreachable.
g.assurt(False)
# We're done processing edges. See if we're missing the fininshing stop.
if route_steps[-1].travel_mode == Travel_Mode.bicycle:
log.debug('Adding last stop: after a bicycle step.')
# Add a last stop for the path.
stop = route_stop.One(
qb, row={'name': self.fin_addr,
'is_pass_through': False,
'is_transit_stop': False,})
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
else:
g.assurt(route_steps[-1].travel_mode == Travel_Mode.transit)
if last_alight is not None:
log.debug('Adding last stop: after a transit stop.')
# We had a transit stop at the very end but no ending link to
# process it, so add a transit stop.
stop = route_stop.One(
qb, row={'name': last_alight.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
else:
# EXPLAIN: We're all good?
log.warning('Not adding last stop for after a transit stop?')
# Fix route steps with null node IDs. I.e., transit stops should be
# aligned with their byway neighbors so that we have a connected path.
self.repair_node_ids(route_steps, route_stops)
log.debug('Path conversion completed')
# DEVS: Uncomment this if you want a lot of output...
#for rs in route_steps:
# log.debug(' >> step: %s %s to %s %s'
# % (rs.transit_type,
# str(rs.beg_time),
# str(rs.fin_time),
# str(rs.transit_name)))
log.debug('path_convert: %.2f secs / rs. cnt: %d'
% ((time.time() - time_0), len(route_steps),))
return (route_steps, route_stops, path_len,)
# Replace null node_ids and beg_node_id/fin_node_id with unique negative
# IDs, so that it's still possible to identify where a stop exists in a
# route, even if the steps aren't part of the byway graph (since we don't
# support editing multimodal routes... yet).
# BUG nnnn: Routed p2: Editable routes.
def repair_node_ids(self, route_steps, route_stops):
# Repair any null step node ids.
route_step.Many.repair_node_ids(self.qb, route_steps)
# Iterate through the stops.
last_found_step = -1
for stop in route_stops:
if not stop.node_id:
# must find a route_step that fits this geometry and assign
# the appropriate node id
step_i = last_found_step + 1
while step_i < len(route_steps):
step = route_steps[step_i]
# FIXED?: Was: geom = gml.flat_to_xys(step.geometry[2:])
geom = gml.flat_to_xys(step.geometry)
# FIXME: MAGIC NUMBERS. FIXME: Use Ccp precision of 0.01?
#fixme: do something like this?
#existing_xy = geometry.wkt_point_to_xy(rows[0]['existing_xy_wkt'],
# precision=conf.node_precision)
#proposed_xy = geometry.wkt_point_to_xy(self.endpoint_wkt,
# precision=conf.node_precision)
#
# FIXME: How does these node IDs relate to node_endpoint, etc.?
if ((abs(stop.x - geom[0][0]) < .0001)
and (abs(stop.y - geom[0][1]) < .0001)):
# matches start of the step
stop.node_id = step.beg_node_id
last_found_step = step_i
break
elif ((abs(stop.x - geom[-1][0]) < .0001)
and (abs(stop.y - geom[-1][1]) < .0001)):
# matches end of the step
stop.node_id = step.fin_node_id
last_found_step = step_i
break
step_i = step_i + 1
# end while.
# Since we moved transit edges' endpoints to match Cyclopath
# node_endpoints, we should have found a matching step by now.
g.assurt(stop.node_id)
# else, stop.node_id is nonzero, so nothing to do.
# Graphserver returns a route that leaves at the start time and will wait
# at the transit stop for however long is necessary. Working back from the
# first transit stop's departure, adjust start time of cycling steps to
# minimize wait.
def steps_adjust_jit_arrival(self, route_steps, slack):
time_0 = time.time()
log.debug('steps_adjust_jit_arrival: len(route_steps): %d'
% (len(route_steps),))
first_transit_step = None
prev_transit_step = None
steps_to_adjust = []
# Look for the first transit edge.
for step in route_steps:
# NOTE: In Pre-Route Sharing, travel_mode was called transit_type
# and in this fcn., we checked that it was 'board_bus' or
# 'board_train'. Now, Travel_Mode.transit also includes the Crossing
# and Link types (see also TripAlight and TripBoard), but Crossing
# not Link should be the first step...
if step.travel_mode == Travel_Mode.transit:
# FIXME: g.assurt this is TripBoard?
if first_transit_step is None:
first_transit_step = step
prev_transit_step = step
else:
# We trace back from the first transit stop, so assemble a list of
# bicycle edges in reverse order.
# FIXME: Only applies to init. bike edges? I.e., not after alight?
# What about transfers?
if first_transit_step is None:
steps_to_adjust.insert(0, step)
if prev_transit_step is not None:
# BUG 2296 Correct the board edges' end times: set to the
# Crossing edge's start time and subtract a minute.
prev_transit_step.fin_time = step.beg_time - 60
prev_transit_step = None
log.debug('steps_adjust_jit_arrival: found: %s (%d edges)'
% (first_transit_step, len(steps_to_adjust),))
# Bug 2293: If the first step is a transit edge, it's a Board edge, and
# it's start time is the time for which the user requested the route
# and for which we configured the State() object when we submitted the
# problem to Graphserver. Correct the Board edge here, otherwise you get
# wonky results -- e.g., if you request a route at 4 AM but the first bus
# isn't until 6 AM, you'll get told to board the bus at 6 AM.
# If there are no transit edges, this is an all-cycling route, so the
# route steps do not need adjustment. Otherwise, go through route steps
# and adjust the cycling edges' start and end times.
if first_transit_step is not None:
transit_departs = first_transit_step.fin_time
total_duration = slack # No. seconds to wait at first transit stop
first_transit_step.beg_time = transit_departs - total_duration
for step in steps_to_adjust:
duration = step.fin_time - step.beg_time
step.fin_time = transit_departs - total_duration
total_duration += duration
step.beg_time = transit_departs - total_duration
log.debug('steps_adjust_jit_arrival: %s / adjust cnt: %d'
% (misc.time_format_elapsed(time_0),
len(steps_to_adjust),))
# *** Route Step support routines
#
def make_route_step_bicycle(self, payload, beg_node, fin_node):
# FIXME: Does route_step.forward and payload.reverse match up?
# Does it affect beg_node_id and fin_node_id?
rs = route_step.One()
rs.travel_mode = Travel_Mode.bicycle
rs.init_from_byway(payload.byway)
rs.forward = payload.forward
# The rating is the generic rating; the caller, route.py, will
# overwrite this with the logged-in user's rating, if the user is
# logged in. Note that we can't use byway.user_rating, since the
# Transit_Graph's byways is a collection on anon. user byways.
rs.rating = payload.byway.generic_rating
# Transit attrs.
# Not applicable to bicycle edges: beg_sta_name, fin_sta_name,
# duration, transit_name
# FIXME: Return 'duration'? It wouldn't be that hard to calculate
# (we already do so in Payload_Cyclopath.cost_graphserver_passable)
# (And I think flashclient caculates this value, too, but it's
# probably not the same as what we calculate... and then you have
# to maintain twice as much code, too. =)
# Shared attrs.
rs.beg_time = beg_node.state.time
rs.fin_time = fin_node.state.time
return rs
#
def make_route_step_link(self, qb, edge, beg_node, fin_node):
# In old CcpV1, there was no travel_mode but instead here we used:
# 'transit_type': 'link',
rs = route_step.One(
qb, row={'travel_mode': Travel_Mode.transit,
'forward': True,
'beg_time': beg_node.state.time,
'fin_time': fin_node.state.time,})
# Get start and end points
if beg_node.label.startswith('sta'):
sql_beg_pt_wkt = self.get_xy_wkt_from_station_node(beg_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_network_node(fin_node)
else:
g.assurt(fin_node.label.startswith('sta')) # Is this right?
sql_beg_pt_wkt = self.get_xy_wkt_from_network_node(beg_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_station_node(fin_node)
# Get straightline geometry
# FIXME: Can graph calculate this when it loads? (a) So we don't waste
# time calculating it now, and (b) so we don't waste time re-calculating
# every time someone makes a route request.
# EXPLAIN: Why are we using SVG here? We usually use E/WKT and xy...
rs.geometry_svg = self.get_straightline_geom(sql_beg_pt_wkt,
sql_fin_pt_wkt)
return rs
# A note about the difference between PSV and STA nodes. I'm still not quite
# sure I get it. =)
#
# From <NAME> (Graphserver Hero Extraordinaire):
#
# "sta-" vertices are "station" vertices and correspond to physical
# transit stops. "psv-" are Pattern-Stop Vertices, or PSVs. These
# vertices represent the state of being on a transit vehicle traveling
# on a particular pattern at a particular stop. For example, a class of
# edges called "TripBoard" edges model moving between station vertices
# which model being at a station and _off_ a vehicle to pattern-stop
# vertices which model being at a station _on_ a vehicle. Then a class
# of edges called "Crossing" edges model going between to PSVs. It's
# a little contrived, but it's necessary to work around the dreaded
# Pro-Active Transferring Bug.
#
# Reference:
#
# https://groups.google.com/group/graphserver/msg/7a18e62fdccf0722
# https://github.com/bmander/graphserver/wiki/Board---Alight---Crossing-Transit-Graphs
#
# Definition:
#
# alighting: Descend from a train, bus, or other form of transportation.
#
def make_route_step_transit(self, qb, edge, beg_node, fin_node):
rs = route_step.One(
qb, row={'travel_mode': Travel_Mode.transit,
'forward': True,
'beg_time': beg_node.state.time,
'fin_time': fin_node.state.time,})
transit_name = None
beg_sta_name = None
fin_sta_name = None
beg_sta_node = None
fin_sta_node = None
# NOTE: Graphserver hard-codes the node labels with 'sta-' and 'psv-'
# prefixes. There's no other mechanism to determine the type of
# vertex other than doing a string compare on its label.
if beg_node.label.startswith('psv'):
# Get beg_sta_name and beg_sta_node.
transit_name = self.get_transit_route_name(beg_node)
beg_sta_node = self.transit_get_station_vertex(beg_node.label,
True)
# BUG 2287: See below
if beg_sta_node is None:
log.info(
'make_route_step_transit: no beg_sta_node: %s (%s / %s)'
% (edge.payload, beg_node, fin_node,))
# Well, if we go the other way, we should find a transit station.
beg_sta_node = self.transit_get_station_vertex(beg_node.label,
False)
if beg_sta_node is not None:
beg_sta_name = self.get_stop_name(beg_sta_node)
else:
# BUG nnnn: l10n
beg_sta_name = 'Start Here'
log.warning('...step_transit: no beg_sta_node: beg_node: %s'
% (beg_node,))
# MAGIC_NUMBER: 'psv': ...
if fin_node.label.startswith('psv'):
transit_name = self.get_transit_route_name(fin_node)
fin_sta_node = self.transit_get_station_vertex(fin_node.label,
False)
# BUG 2287: Some transit stops' PSV nodes only have incoming or
# outgoing Crossing edges, but do not have incoming or
# outgoing Alight or Board edges.
#
# E.g. search for a multimodal route from 'cs building' to
# 'moa' with Minimize Biking and More Busing, 6/29/2011 at
# 8:45 AM. The last edge, before the Alight, is a Crossing
# whose vertices are both PSVs, but one of the PSVs is only
# attached to a Crossing edge, and not to an Alight or
# Board.
#
# For now, this seems to do the trick: just look at the
# other direction (so, if the incoming edges is just a
# Crossing edge, look at the outgoing edges).
#
# I'm not [lb isn't] 100% sure this is the proper solution,
# but it works for now....
if fin_sta_node is None:
log.info(
'make_route_step_transit: no fin_sta_node: %s (%s / %s)'
% (edge.payload, beg_node, fin_node,))
fin_sta_node = self.transit_get_station_vertex(fin_node.label,
True)
if fin_sta_node is not None:
fin_sta_name = self.get_stop_name(fin_sta_node)
else:
# BUG nnnn: l10n
fin_sta_name = 'End Here'
log.warning('...step_transit: no fin_sta_node: fin_node: %s'
% (fin_node,))
# FIXME: MAGIC_NUMBER: hack to identify light rail...
# FIXME: What's the long term solution here? Is this data indicated in
# the GTFSDB?
transit_type_name = 'Bus'
if transit_name == '55':
transit_type_name = 'Train'
# MAYBE: Do we (in route_step) or does flashclient care about board v.
# alight?
if isinstance(edge.payload, TripBoard):
# TripBoard only needs station name and route name
rs.step_name = '%s %s at %s' % (transit_type_name,
transit_name,
beg_sta_name or fin_sta_name,)
elif isinstance(edge.payload, TripAlight):
# TripAlight only needs station name and route name
rs.step_name = '%s %s at %s' % (transit_type_name,
transit_name,
fin_sta_name or beg_sta_name,)
elif isinstance(edge.payload, Crossing):
# Crossing only needs route name
rs.step_name = '%s %s' % (transit_type_name, transit_name,)
# If Crossing, get straightline geometry from start to end station.
if (beg_sta_node is not None) and (fin_sta_node is not None):
sql_beg_pt_wkt = self.get_xy_wkt_from_station_node(beg_sta_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_station_node(fin_sta_node)
rs.geometry_svg = self.get_straightline_geom(sql_beg_pt_wkt,
sql_fin_pt_wkt)
else:
# Crossing should have PSV endpoints, but not always...
log.warning('...step_transit: messed up Crossing?: %s / %s / %s'
% (edge, beg_sta_node, fin_sta_node,))
else:
log.warning('EXPLAIN: Why no geometry for this transit step?')
rs.geometry_svg = None
return rs
# ***
# FIXME: This works for now, but it needs cleaning up
#
def transit_get_station_vertex(self, psv_label, is_outgoing):
'''Search for a station vertex (i.e. its label starts with sta-)
so that we can use its id to look up stop information. PSV vertices
do not contain the stop id unfortunately, so we can't use them.
NOTE: It appears that some stops cannot be found when using certain
values for is_outgoing, so if one value doesn't work, the other
should be used as a fallback.'''
if is_outgoing:
edges = self.gserver.get_vertex(psv_label).outgoing
else:
edges = self.gserver.get_vertex(psv_label).incoming
for edge in edges:
if (isinstance(edge.payload, TripAlight)):
# This is the last route step when routing to a transit stop as the
# final destination.
g.assurt(is_outgoing)
return edge.to_v
elif (isinstance(edge.payload, TripBoard)):
# If you route from a transit stop, look at incoming edges... I
# guess.
g.assurt(not is_outgoing)
return edge.from_v
# else, it's a Crossing; if there's a next edge in the list, it's an
# Alight or Board edge.
return None
# *** Static Support Routines
# FIXME: BUG 2291: This fcn. does not respek daylight savings time
# FIXME: If date outside (before or after) GTFSDB calendar, warn user
#
@staticmethod
def date_flashclient_mktime(date_str):
log.debug('date_flashclient_mktime: date_str (1): %s' % (date_str,))
# If daylight savings, remove Mpls' GMT
if (date_str.find('GMT-0500') != -1):
date_str = date_str.replace('GMT-0500', '')
log.debug('date_flashclient_mktime: Stripped GMT-0500')
elif (date_str.find('GMT-0600') != -1):
# If winter, remove Mpls' non-CDT GMT
date_str = date_str.replace('GMT-0600', '')
log.debug('date_flashclient_mktime: Stripped GMT-0600')
else:
g.assurt(False)
log.debug('date_flashclient_mktime: date_str: %s' % (date_str,))
secs_since_epoch = time.mktime(time.strptime(date_str,
'%a %b %d %H:%M:%S %Y'))
log.debug('date_flashclient_mktime: secs_since_epoch: %s'
% (secs_since_epoch,))
return secs_since_epoch
# *** SQL Support Routines
#
def get_xy_wkt_from_network_node(self, node_endpoint):
node_id = node_endpoint.label
ndpt = node_endpoint.Many.node_endpoint_get(self.qb, node_id, pt_xy=None)
g.assurt(ndpt is not None)
try:
if ndpt.endpoint_wkt:
geom = ndpt.endpoint_wkt
else:
g.assurt(False) # 2012.08.02: Deprecated. See ndpt.endpoint_wkt.
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless(ndpt.endpoint_xy)
sql_points = self.qb.db.sql("SELECT %s" % (point_sql,))
log.debug('get_xy_wkt_from_network_node: rows: %s'
% (sql_points,))
geom = sql_points[0]['st_asewkt']
except IndexError:
log.warning(
'get_xy_wkt_from_network_node: missing geom: node_endpoint: %s'
% (node_endpoint, node_id,))
# I'm [lb's] not sure how best to propagate this error, so let's
# just assume it'll never happen, cool beans?
g.assurt(False)
geom = None
return geom
#
def get_straightline_geom(self, beg_pt_wkt, fin_pt_wkt):
rows = self.qb.db.sql(
"""
SELECT
ST_AsSVG(ST_Scale(ST_MakeLine(('%s'), ('%s')), 1, -1, 1), 0, %d)
AS geometry
""" % (beg_pt_wkt, fin_pt_wkt, conf.db_fetch_precision,))
return rows[0]['geometry']
#
# MAYBE: Rename from_ and to_ to beg_ and fin_.
def get_straightline_geom_len_sql(self, beg_xy, fin_xy):
# FIXME: Why not use wkt_point_to_xy, et al.?
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_lhs = geometry.xy_to_raw_point_lossless(beg_xy)
point_rhs = geometry.xy_to_raw_point_lossless(fin_xy)
rows = self.qb.db.sql(
"SELECT ST_Length(ST_MakeLine(ST_AsEWKT(%s), ST_AsEWKT(%s)))"
% (point_lhs, point_rhs,))
return rows[0]['st_length']
#
def get_straightline_geom_len_raw(self, beg_xy, fin_xy):
return geometry.distance(beg_xy, fin_xy)
# TRANSITDB
#
def get_stop_xy(self, station_node):
cursor = self.db_gtfs.conn.cursor()
station_id = station_node.label[4:]
cursor.execute(
"SELECT stop_lat, stop_lon FROM stops WHERE stop_id = %s"
% (station_id,))
row = cursor.fetchone()
return row
#
def get_stop_map_xy(self, station_node):
stop_xy = self.get_stop_xy(station_node)
log.debug('get_stop_map_xy: stop_xy: %s' % (stop_xy,))
# Convert to map coordinates.
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless((stop_xy[1], stop_xy[0],),
srid=conf.srid_latlon)
rows = self.qb.db.sql(
"""
SELECT
ST_X(ST_Transform(%s, %d))
AS xcoord,
ST_Y(ST_Transform(%s, %d))
AS ycoord
""" % (point_sql, conf.default_srid,
point_sql, conf.default_srid,))
log.debug('get_stop_map_xy: %s' % (rows[0],))
return [rows[0]['xcoord'], rows[0]['ycoord']]
#
def get_xy_wkt_from_station_node(self, station_node):
stop_xy = self.get_stop_xy(station_node)
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless((stop_xy[1], stop_xy[0],),
srid=conf.srid_latlon)
rows = self.qb.db.sql("SELECT ST_AsEWKT(ST_Transform(%s, %d))"
% (point_sql, conf.default_srid,))
log.debug('get_xy_wkt_from_station_node: rows: %s' % (rows,))
return rows[0]['st_asewkt']
#
def get_stop_name(self, station_node):
cursor = self.db_gtfs.conn.cursor()
station_id = station_node.label[4:]
cursor.execute(
"SELECT stop_name FROM stops WHERE stop_id = %s"
% (station_id,))
row = cursor.fetchone()
# EXPLAIN: When is the value not a string?
stop_name = str(row[0])
return stop_name
#
def get_transit_route_name(self, psv_node):
# Fetch route_short_name from the transit database.
cursor = self.db_gtfs.conn.cursor()
cursor.execute(
"SELECT route_id FROM trips WHERE trip_id = '%s'"
% (psv_node.state.trip_id,))
row = cursor.fetchone()
rte_id = row[0]
# NOTE: This is a transit route, not a Cyclopath route.
cursor.execute(
"SELECT route_short_name FROM routes WHERE route_id = '%s'"
% (rte_id,))
row = cursor.fetchone()
# EXPLAIN: When is the value not a string?
route_name = str(row[0])
return route_name
# ***
# *** Unit tests
def unit_test_01():
dateStr = "Fri Mar 25 03:30:00 GMT-0500 2011"
Problem.date_flashclient_mktime(dateStr)
if (__name__ == '__main__'):
unit_test_01()
| StarcoderdataPython |
3401188 | import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
import os, sys
import argparse
import pickle
import math
from efficientnet_pytorch import EfficientNet
import torch.optim as optim
from torch.optim import lr_scheduler
import time
import copy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class VariantDataset(Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data_table):
'Initialization'
self.labels = data_table[:,1]
self.list_npys = data_table[:,0]
def __len__(self):
'Denotes the total number of samples'
return len(self.labels)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
npy_file = self.list_npys[index]
# Load data and get label
X = torch.from_numpy(np.load(npy_file).transpose(2,0,1)/255)
y = int(self.labels[index])
return X, y
def model_train(model, criterion, optimizer, scheduler, num_epochs, model_type):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
running_losses = []
for epoch in range(num_epochs):
sys.stdout.write('Epoch {}/{}'.format(epoch, num_epochs - 1)+"\n")
sys.stdout.write('-' * 10 + "\n")
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
all_running_loss = 0.0
running_corrects = 0
# Iterate over data.
i = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device, dtype=torch.float)
labels = labels.to(device)
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(phase == 'train'):
if phase == "train":
if model_type.startswith("inception"):
outputs, aux_outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels) + criterion(aux_outputs, labels)
else:
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
if i % 100 == 99:
running_losses.append(running_loss/100)
running_loss = 0.0
i += 1
else:
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# statistics
all_running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
sys.stdout.write('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc) + "\n")
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
sys.stdout.write('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60) + "\n")
sys.stdout.write('Best val Acc: {:4f}'.format(best_acc) + "\n")
# load best model weights
model.load_state_dict(best_model_wts)
return model, running_losses
def getOptions(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Parses command.")
parser.add_argument("-i", "--input_file", required=True, help="the labeled input file; required field: npy_filepath, label")
parser.add_argument("-e", "--train_epoch", required=False, type=int, default=0, help="conduct training on user-provided labeled \
sample from your own data set with \
provided number of epochs to train.")
parser.add_argument("-m", "--model", required=False, default="efficientnet-b0", help="the convolutional neural network model \
transfer learning is based on.")
parser.add_argument("-b", "--batch_size", required=False, type=int, default=10, help="traing or testing batch size.")
parser.add_argument("-o", "--output_file", required=True, help="prediction output file")
options = parser.parse_args(args)
return options
def main(argv):
options = getOptions(sys.argv[1:])
input_file = options.input_file
epoch = opions.train_epoch
model_name = options.model
batch_size = options.batch_size
output_file = os.path.abspath(options.output_file)
if not os.path.exists(input_file):
sys.stderr.write("Please provide a valid input file.")
sys.exit(2)
#user provided input data has two columns: npy_filepath, label (note: npy_filepath could be obtained from deepmosaic-draw)
data = pd.read_csv(input_file, sep="\t")
output_dir = "/".join(output_file.split("/")[:-1])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': 6}
model_type = model_name.split("_")[0]
model_path = pkg_resources.resource_filename('deepmosaic', 'models/' + model_name)
#model_name = os.path.abspath(model_path).split("/")[-1]
if model_name.startswith("efficientnet"):
model = EfficientNet.from_pretrained(model_type)
num_ftrs = model._fc.in_features
model._fc = nn.Linear(num_ftrs, 3)
model.load_state_dict(torch.load(model_path,map_location=device))
model = model.to(device)
elif model_name.startswith("densenet"):
model = torch.hub.load('pytorch/vision:v0.5.0', 'densenet121', pretrained=True)
num_ftrs = model.classifier.in_features
model.classifier = nn.Linear(num_ftrs, 3)
model.load_state_dict(torch.load(model_path,map_location=device))
model = model.to(device)
elif model_name.startswith("inception"):
model = models.inception_v3(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 3)
model.load_state_dict(torch.load(model_path,map_location=device))
model = model.to(device)
elif model_name.startswith("resnet"):
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 3)
model.load_state_dict(torch.load(model_path,map_location=device))
model = model.to(device)
#start training-----------------------------------------
for i in range(epoch):
train_data = data[:int(len(data)*0.8), :]
validation_data = data[int(len(data)*0.8):, :]
training_generator = DataLoader(VariantDataset(train_data), **params)
validation_generator = DataLoader(VariantDataset(validation_data), **params)
global dataloaders
global dataset_sizes
dataloaders= {"train": training_generator, "val": validation_generator}
dataset_sizes = {'train':len(train_data), "val": len(validation_data)}
#initialize criterion etc.
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
#pseudo training
model, loss_list = model_train(model, criterion, optimizer_ft, exp_lr_scheduler, 1, model_name)
#save your model
torch.save(model.state_dict(), output_file)
np.save(output_dir + "/training_loss.npy", np.array(loss_list))
sys.stdout.write("complete\n")
| StarcoderdataPython |
6630062 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zone create command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.dns import flags
from googlecloudsdk.command_lib.dns import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v1')
messages = apis.GetMessagesModule('dns', 'v1')
zone_ref = resources.REGISTRY.Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(base.CreateCommand):
r"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
## EXAMPLES
To create a managed-zone, run:
$ {command} my_zone --dns-name my.zone.com. --description "My zone!"
"""
UNUSED_DNSSEC_EXAMPLE = """
To create a managed-zone with DNSSEC, run:
$ {command} my_zone_2 --description "Signed Zone" \
--dns-name myzone.example \
--dnssec-state=on
"""
@staticmethod
def Args(parser):
flags.GetDnsZoneArg(
'The name of the managed-zone to be created.').AddToParser(parser)
flags.GetManagedZonesDnsNameArg().AddToParser(parser)
flags.GetManagedZonesDescriptionArg(required=True).AddToParser(parser)
flags.AddCommonManagedZonesDnssecArgs(parser)
parser.display_info.AddCacheUpdater(flags.ManagedZoneCompleter)
def Collection(self):
return 'dns.managedZones'
def Run(self, args):
dns = apis.GetClientInstance('dns', 'v2beta1')
messages = apis.GetMessagesModule('dns', 'v2beta1')
zone_ref = util.GetRegistry('v2beta1').Parse(
args.dns_zone,
params={
'project': properties.VALUES.core.project.GetOrFail,
},
collection='dns.managedZones')
dnssec_config = None
if args.dnssec_state is not None:
dnssec_config = command_util.ParseDnssecConfigArgs(args, messages)
else:
bad_args = ['denial_of_existence', 'ksk_algorithm', 'zsk_algorithm',
'ksk_key_length', 'zsk_key_length']
for bad_arg in bad_args:
if getattr(args, bad_arg, None) is not None:
raise exceptions.InvalidArgumentException(
bad_arg,
'DNSSEC must be enabled in order to use other DNSSEC arguments. '
'Please set --dnssec-state to "on" or "transfer".')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description,
dnssecConfig=dnssec_config)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return [result]
| StarcoderdataPython |
4904631 | <reponame>StepFunctionLLC/mlutils
from setuptools import setup
setup(
name='mlutils',
version='0.1.1',
description='Public ML Utilities',
author='Dave',
author_email='<EMAIL>',
packages=['mlutils'], #same as name
install_requires=['matplotlib'], #external packages as dependencies
)
| StarcoderdataPython |
4907754 | # copy of https://github.com/dbrgn/result
# MIT licensed
from __future__ import print_function, division, absolute_import, unicode_literals
class Result(object):
"""
A simple `Result` type inspired by Rust.
Not all methods (https://doc.rust-lang.org/std/result/enum.Result.html)
have been implemented, only the ones that make sense in the Python context.
You still don't get any type checking done.
"""
def __init__(self, force=False):
if force is not True:
raise RuntimeError("Don't instantiate a Result directly. "
"Use the Ok(value) and Err(error) class methods instead.")
def __eq__(self, other):
return self._type == other._type and self._val == other._val
def is_ok(self):
return self._type == 'ok'
def is_err(self):
return self._type == 'error'
def __bool__(self):
return self.is_ok()
__nonzero__ = __bool__
def ok(self):
"""
Return the value if it is an `Ok` type. Return `None` if it is an `Err`.
"""
return self._val if self.is_ok() else None
def err(self):
"""
Return the error if this is an `Err` type. Return `None` otherwise.
"""
return self._val if self.is_err() else None
@property
def value(self):
"""
Return the inner value. This might be either the ok or the error type.
"""
return self._val
# TODO: Implement __iter__ for destructuring
def __repr__(self):
return str(self)
def __str__(self):
return "{}({})".format(self._type.title(), repr(self._val))
class Ok(Result):
def __init__(self, value=True):
super(Ok, self).__init__(force=True)
self._val = value
self._type = 'ok'
class Skip(Ok):
def __init__(self, value=True):
super(Skip, self).__init__(value)
class Err(Result):
def __init__(self, value=True):
super(Err, self).__init__(force=True)
self._val = value
self._type = 'error'
| StarcoderdataPython |
6632871 | #!/usr/bin/env python
"""Instant output plugins used by the API for on-the-fly conversion."""
import itertools
import re
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.server import aff4
from grr.server import export
class InstantOutputPlugin(object):
"""The base class for instant output plugins.
Instant output plugins do on-the-fly data conversion and are used in
GetExportedFlowResults/GetExportedHuntResults methods.
"""
__metaclass__ = registry.MetaclassRegistry
__abstract = True # pylint: disable=g-bad-name
plugin_name = None
friendly_name = None
description = None
output_file_extension = ""
@classmethod
def GetPluginClassByPluginName(cls, name):
for plugin_cls in cls.classes.values():
if plugin_cls.plugin_name == name:
return plugin_cls
raise KeyError("No plugin with name attribute '%s'." % name)
def __init__(self, source_urn=None, token=None):
"""OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
token: Security token.
Raises:
ValueError: If one of the keyword arguments is empty.
"""
super(InstantOutputPlugin, self).__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
if not token:
raise ValueError("token can't be empty.")
self.source_urn = source_urn
self.token = token
@property
def output_file_name(self):
"""Name of the file where plugin's output should be written to."""
safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/"))
return "results_%s%s" % (safe_path, self.output_file_extension)
def Start(self):
"""Start method is called in the beginning of the export.
Yields:
Chunks of bytes.
"""
def ProcessValues(self, value_cls, values_generator_fn):
"""Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within
1 ProcessValues() call - for example, when multiple passes over
the data are required.
"""
raise NotImplementedError()
def Finish(self):
"""Finish method is called at the very end of the export.
Yields:
Chunks of bytes.
"""
class InstantOutputPluginWithExportConversion(InstantOutputPlugin):
"""Instant output plugin that flattens data before exporting."""
__abstract = True # pylint: disable=g-bad-name
BATCH_SIZE = 5000
def __init__(self, *args, **kwargs):
super(InstantOutputPluginWithExportConversion, self).__init__(
*args, **kwargs)
self._cached_metadata = {}
def _GetMetadataForClients(self, client_urns):
"""Fetches metadata for a given list of clients."""
result = {}
metadata_to_fetch = set()
for urn in client_urns:
try:
result[urn] = self._cached_metadata[urn]
except KeyError:
metadata_to_fetch.add(urn)
if metadata_to_fetch:
client_fds = aff4.FACTORY.MultiOpen(
metadata_to_fetch, mode="r", token=self.token)
fetched_metadata = [
export.GetMetadata(client_fd, token=self.token)
for client_fd in client_fds
]
for metadata in fetched_metadata:
metadata.source_urn = self.source_urn
self._cached_metadata[metadata.client_urn] = metadata
result[metadata.client_urn] = metadata
metadata_to_fetch.remove(metadata.client_urn)
for urn in metadata_to_fetch:
default_mdata = export.ExportedMetadata(source_urn=self.source_urn)
result[urn] = default_mdata
self._cached_metadata[urn] = default_mdata
return [result[urn] for urn in client_urns]
def GetExportOptions(self):
"""Rerturns export options to be used by export converter."""
return export.ExportOptions()
def ProcessSingleTypeExportedValues(self, original_type, exported_values):
"""Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted
to exported_values.
exported_values: An iterator with exported value. All values are
guranteed to have the same class.
Yields:
Chunks of bytes.
"""
raise NotImplementedError()
def _GenerateSingleTypeIteration(self, next_types, processed_types,
converted_responses):
"""Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used
already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value.
"""
if not next_types:
current_type = None
else:
current_type = next_types.pop()
processed_types.add(current_type)
for converted_response in converted_responses:
if not current_type:
current_type = converted_response.__class__
processed_types.add(current_type)
if converted_response.__class__ != current_type:
if converted_response.__class__ not in processed_types:
next_types.add(converted_response.__class__)
continue
yield converted_response
def _GenerateConvertedValues(self, converter, grr_messages):
"""Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set.
"""
for batch in utils.Grouper(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter.BatchConvert(
batch_with_metadata, token=self.token):
yield result
def ProcessValues(self, value_type, values_generator_fn):
converter_classes = export.ExportConverter.GetConvertersByClass(value_type)
if not converter_classes:
return
converters = [cls(self.GetExportOptions()) for cls in converter_classes]
next_types = set()
processed_types = set()
while True:
converted_responses = itertools.chain.from_iterable(
self._GenerateConvertedValues(converter, values_generator_fn())
for converter in converters)
generator = self._GenerateSingleTypeIteration(next_types, processed_types,
converted_responses)
for chunk in self.ProcessSingleTypeExportedValues(value_type, generator):
yield chunk
if not next_types:
break
def ApplyPluginToMultiTypeCollection(plugin, output_collection,
source_urn=None):
"""Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: disable=cell-var-from-loop
def GetValues():
for timestamp, value in output_collection.ScanByType(stored_type_name):
_ = timestamp
if source_urn:
value.source = source_urn
yield value
# pylint: enable=cell-var-from-loop
for chunk in plugin.ProcessValues(stored_cls, GetValues):
yield chunk
for chunk in plugin.Finish():
yield chunk
| StarcoderdataPython |
3598282 | <gh_stars>0
import numpy as np
from scipy import signal
from numpy import exp
from pylab import *
b = np.array([1.0])
a = np.array([3.0, 1.0])
B, A = signal.bilinear(b, a)
print B, A
#print 7*B, 7*A
| StarcoderdataPython |
4982105 | import pandas as pd
import nltk
from textblob.classifiers import NaiveBayesClassifier
from textblob import TextBlob
from pandas_ods_reader import read_ods
from pandas_ods_reader import read_ods
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from string import punctuation
def shareCheck(files, dir):
list_share = []
train_file = "./80/aleatorio/treino/compartilhamento/train_gen_comp.ods"
test_file = "./80/aleatorio/teste/compartilhamento/test_gen_comp.ods"
sheet_id = 1
df = read_ods(train_file, sheet_id)
df = pd.DataFrame(data=df)
df.columns = ['Frase', 'Sentimento']
train_file_records = df.to_records(index=False)
clas = NaiveBayesClassifier(train_file_records)
test_set_file = read_ods(test_file, sheet_id)
test_set_file = pd.DataFrame(data=test_set_file)
test_file_records = test_set_file.to_records(index=False)
accuracy = clas.accuracy(test_file_records)
prob_vec = []
for f in files:
with open(dir+f, 'r') as f:
lines = f.read()
blob = TextBlob(lines, classifier=clas)
list_share.append(blob.classify())
prob_dist = clas.prob_classify(lines)
if (blob.classify() == 'compartilhamento'):
prob_vec.append(round(prob_dist.prob('compartilhamento'), 2))
else:
prob_vec.append(round(prob_dist.prob('sem compartilhamento'), 2))
# print("Texto: ", f, "Comp: ", round(prob_dist.prob("compartilhamento"),2), "Sem: ", round(prob_dist.prob("sem compartilhamento"),2))
f.close()
return list_share, accuracy, prob_vec
def removeStopWords(text, stopwords):
return text
| StarcoderdataPython |
3396832 | import pytest
import inspect
import tubular.testing.helpers as h
import numpy as np
def test_arguments():
"""Test arguments for arguments of tubular.testing.helpers.assert_np_nan_eqal_msg."""
expected_arguments = ["actual", "expected", "msg"]
arg_spec = inspect.getfullargspec(h.assert_np_nan_eqal_msg)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
for i, (e, a) in enumerate(zip(expected_arguments, arguments)):
assert e == a, f"Incorrect arg at index {i} -\n Expected: {e}\n Actual: {a}"
default_values = arg_spec.defaults
assert (
default_values is None
), f"Unexpected default values -\n Expected: None\n Actual: {default_values}"
@pytest.mark.parametrize("expected, actual", [(1, np.NaN), (np.NaN, 1), (1, 1)])
def test_error_raised_unequal(expected, actual):
"""Test an assertion error is raised if both values are not None."""
msg_tag = "bbb"
with pytest.raises(
AssertionError,
match=f"Both values are not equal to np.NaN -\n Expected: {expected}\n Actual: {actual}",
):
h.assert_np_nan_eqal_msg(expected=expected, actual=actual, msg=msg_tag)
| StarcoderdataPython |
1735572 | <filename>src/network.py
"""
Package for interacting on the network at a high level.
"""
import binascii
import pickle
from twisted.internet.task import LoopingCall
from twisted.internet import defer, reactor, task
from log import Logger
from protocol import KademliaProtocol
from utils import deferred_dict, generate_node_id
from storage import ForgetfulStorage
from node import Node
from crawling import ValueSpiderCrawl
from crawling import NodeSpiderCrawl
class Server(object):
"""
High level view of a node instance. This is the object that should be created
to start listening as an active node on the network.
"""
def __init__(self, ksize=20, alpha=3, id=None, storage=None):
"""
Create a server instance. This will start listening on the given port.
Args:
ksize (int): The k parameter from the paper
alpha (int): The alpha parameter from the paper
id: The id for this node on the network.
storage: An instance that implements :interface:`~kademlia.storage.IStorage`
"""
self.ksize = ksize
self.alpha = alpha
self.log = Logger(system=self)
self.storage = storage or ForgetfulStorage(ttl=30 * 60)
self.node = Node(id or generate_node_id())
self.protocol = KademliaProtocol(self.node, self.storage, ksize)
self.refreshLoop = LoopingCall(self.refresh_table).start(3600)
def listen(self, port, interface=""):
"""
Start listening on the given port.
This is the same as calling::
reactor.listenUDP(port, server.protocol)
Provide interface="::" to accept ipv6 address
"""
return reactor.listenUDP(port, self.protocol, interface)
def refresh_table(self):
"""
Refresh buckets that haven't had any lookups in the last hour
(per section 2.3 of the paper).
"""
ds = []
for id in self.protocol.getRefreshIDs():
node = Node(id)
nearest = self.protocol.router.findNeighbors(node, self.alpha)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
ds.append(spider.find())
def republishKeys(_):
ds = []
# Republish keys older than one hour
for dkey, value in self.storage.iteritemsOlderThan(3600):
ds.append(self.digest_set(dkey, value))
return defer.gatherResults(ds)
return defer.gatherResults(ds).addCallback(republishKeys)
def bootstrappable_neighbors(self):
"""
Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for use as an argument
to the bootstrap method.
The server should have been bootstrapped
already - this is just a utility for getting some neighbors and then
storing them if this server is going down for a while. When it comes
back up, the list of nodes can be used to bootstrap.
"""
neighbors = self.protocol.router.findNeighbors(self.node)
return [tuple(n)[-2:] for n in neighbors]
def bootstrap(self, addrs):
"""
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
"""
# if the transport hasn't been initialized yet, wait a second
if self.protocol.transport is None:
return task.deferLater(reactor, 1, self.bootstrap, addrs)
def init_table(results):
nodes = []
for addr, result in results.items():
if result[0]:
nodes.append(Node(result[1]["id"], addr[0], addr[1]))
spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)
return spider.find()
ds = {}
for addr in addrs:
ds[addr] = self.protocol.ping(addr, self.node.id)
return deferred_dict(ds).addCallback(init_table)
def inet_visible_ip(self):
"""
Get the internet visible IP's of this node as other nodes see it.
Returns:
A `list` of IP's. If no one can be contacted, then the `list` will be empty.
"""
def handle(results):
ips = [result[1][0] for result in results if result[0]]
self.log.debug("other nodes think our ip is %s" % str(ips))
return ips
ds = []
for neighbor in self.bootstrappable_neighbors():
ds.append(self.protocol.stun(neighbor))
return defer.gatherResults(ds).addCallback(handle)
def get_peers(self, info_hash):
"""
Get a key if the network has it.
Returns:
:class:`None` if not found, the value otherwise.
"""
# if this node has it, return it
if self.storage.get(info_hash) is not None:
return defer.succeed(self.storage.get(info_hash))
node = Node(info_hash)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to get key %s" % info_hash)
return defer.succeed(None)
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find()
def announce_peer(self, info_hash, port):
"""
Set the given key to the given value in the network.
"""
self.log.debug("setting '%s' = '%s' on network" % (info_hash, port))
key = Node(info_hash)
# this is useful for debugging messages
hkey = binascii.hexlify(info_hash)
def _any_announce_respond_success(responses):
for defer_success, result in responses:
peer_reached, peer_response, node = result
if defer_success and peer_reached and peer_response:
return True
return False
def _any_get_peers_respond_success(responses):
ds = []
for defer_success, result in responses:
peer_reached, peer_response, node = result
if defer_success and peer_reached and "token" in peer_response:
ds.append(self.protocol.callAnnouncePeer(node, key, port, peer_response["token"]))
if ds:
return defer.DeferredList(ds).addCallback(_any_announce_respond_success)
else:
return False
def _store(nodes):
self.log.info("setting '%s' on %s" % (hkey, map(str, nodes)))
# if this node is close too, then store here as well
if self.node.distanceTo(key) < max([n.distanceTo(key) for n in nodes]):
self.storage[info_hash] = port
ds = [self.protocol.callGetPeers(n, key) for n in nodes]
return defer.DeferredList(ds).addCallback(_any_get_peers_respond_success)
nearest = self.protocol.router.findNeighbors(key)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % hkey)
return defer.succeed(False)
spider = NodeSpiderCrawl(self.protocol, key, nearest, self.ksize, self.alpha)
return spider.find().addCallback(_store)
def save_state(self, fname):
"""
Save the state of this node (the alpha/ksize/id/immediate neighbors)
to a cache file with the given fname.
"""
data = {'ksize': self.ksize,
'alpha': self.alpha,
'id': self.node.id,
'neighbors': self.bootstrappable_neighbors()}
if len(data['neighbors']) == 0:
self.log.warning("No known neighbors, so not writing to cache.")
return
with open(fname, 'w') as f:
pickle.dump(data, f)
@staticmethod
def load_state(fname):
"""
Load the state of this node (the alpha/ksize/id/immediate neighbors)
from a cache file with the given fname.
"""
with open(fname, 'r') as f:
data = pickle.load(f)
s = Server(data['ksize'], data['alpha'], data['id'])
if len(data['neighbors']) > 0:
s.bootstrap(data['neighbors'])
return s
def save_state_regularly(self, fname, frequency=600):
"""
Save the state of node with a given regularity to the given
filename.
Args:
fname: File name to save retularly to
frequency: Frequency in seconds that the state should be saved.
By default, 10 minutes.
"""
loop = LoopingCall(self.save_state, fname)
loop.start(frequency)
return loop
| StarcoderdataPython |
4971564 | <reponame>Lcvette/qtpyvcp
import os
from qtpy import uic
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QDialog
from qtpyvcp.utilities.logger import getLogger
LOG = getLogger(__name__)
class BaseDialog(QDialog):
"""Base Dialog
Base QtPyVCP dialog class.
This is intended to be used as a base class for custom dialogs, as well
as a provider for use in YAML config files. This allows loading custom
dialogs from .ui files without needing to write any python code.
You can launch dialogs using a
:doc:`Dialog Button </widgets/buttons/index>` or
from a window menu item.
Example:
YAML config for loading a custom dialog called `my_dialog` from a .ui
file named ``my_diloag.ui`` located in the same dir as the .yml file::
dialogs:
my_dialog:
provider: qtpyvcp.widgets.dialogs.base_dialog:BaseDialog
kwargs:
ui_file: {{ file.dir }}/my_dialog.ui
title: My Dialog Title # optional, set the dialog title
modal: false # optional, whether the dialog is modal
popup: false # optional, whether the dialog is a popup
frameless: false # optional, whether the dialog is frameless
stay_on_top: true # optional, whether the dialog stays on top
Args:
parent (QWidget, optional) : The dialog's parent window, or None.
ui_file (str, optional) : The path of a .ui file to load the dialog
from. The ui base widget should be a QDialog.
title (str, optional) : The title to use for the dialog. This will
override any title property set in QtDesigner.
modal (bool, optional) : Whether the dialog should be application modal.
This will override any modality hints set in QtDesigner.
frameless (bool, optional) : Whether the window has a frame or not.
If the window does not have a frame you will need some way to
close it, like an Ok or Cancel button.
popup: (bool, optional) : Makes the dialog use a frame less window
that automatically hides when it looses focus.
stay_on_top (bool, optional) : Sets the stay on top hint window flag.
This overrides any window flags set in QtDesiger.
"""
def __init__(self, parent=None, ui_file=None, title=None, modal=None,
popup=None, frameless=None, stay_on_top=None):
super(BaseDialog, self).__init__(parent)
if ui_file is not None:
self.loadUiFile(ui_file)
if title is not None:
self.setWindowTitle(title)
if modal is not None:
if modal:
self.setWindowModality(Qt.ApplicationModal)
else:
self.setWindowModality(Qt.NonModal)
if popup is not None:
self.setWindowFlags(Qt.Popup)
if frameless is not None:
self.setWindowFlag(Qt.FramelessWindowHint, frameless)
if stay_on_top is not None:
self.setWindowFlag(Qt.WindowStaysOnTopHint, stay_on_top)
def loadUiFile(self, ui_file):
"""Load dialog from a .ui file.
The .ui file base class should be a QDialog.
Args:
ui_file (str) : path to the .ui file to load.
"""
ui_file = os.path.realpath(ui_file)
if not os.path.isfile(ui_file):
LOG.error("Specified UI for dialog does not exist: %s", ui_file)
return
LOG.debug("Loading dialog from ui_file: %s", ui_file)
uic.loadUi(ui_file, self)
def setWindowFlag(self, flag, on):
"""BackPort QWidget.setWindowFlag() implementation from Qt 5.9
This method was introduced in Qt 5.9 so is not present
in Qt 5.7.1 which is standard on Debian 9 (stretch), so
add our own implementation.
"""
if on:
# add flag
self.setWindowFlags(self.windowFlags() | flag)
else:
# remove flag
self.setWindowFlags(self.windowFlags() ^ flag)
| StarcoderdataPython |
267734 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set users' email settings.
EmailSettingsService: Set various email settings.
"""
__author__ = '<EMAIL>'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
# Forwarding and POP3 options
KEEP='KEEP'
ARCHIVE='ARCHIVE'
DELETE='DELETE'
ALL_MAIL='ALL_MAIL'
MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON'
class EmailSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Email Settings service."""
def _serviceUrl(self, setting_id, username, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username,
setting_id)
def _bool2str(self, b):
if b is None:
return None
return str(b is True).lower()
def CreateLabel(self, username, label):
"""Create a label.
Args:
username: User to create label for.
label: Label to create.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('label', username)
properties = {'label': label}
return self._PostProperties(uri, properties)
def CreateFilter(self, username, from_=None, to=None, subject=None,
has_the_word=None, does_not_have_the_word=None,
has_attachment=None, label=None, should_mark_as_read=None,
should_archive=None):
"""Create a filter.
Args:
username: User to create filter for.
from_: Filter from string.
to: Filter to string.
subject: Filter subject.
has_the_word: Words to filter in.
does_not_have_the_word: Words to filter out.
has_attachment: Boolean for message having attachment.
label: Label to apply.
should_mark_as_read: Boolean for marking message as read.
should_archive: Boolean for archiving message.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('filter', username)
properties = {}
properties['from'] = from_
properties['to'] = to
properties['subject'] = subject
properties['hasTheWord'] = has_the_word
properties['doesNotHaveTheWord'] = does_not_have_the_word
properties['hasAttachment'] = self._bool2str(has_attachment)
properties['label'] = label
properties['shouldMarkAsRead'] = self._bool2str(should_mark_as_read)
properties['shouldArchive'] = self._bool2str(should_archive)
return self._PostProperties(uri, properties)
def CreateSendAsAlias(self, username, name, address, reply_to=None,
make_default=None):
"""Create alias to send mail as.
Args:
username: User to create alias for.
name: Name of alias.
address: Email address to send from.
reply_to: Email address to reply to.
make_default: Boolean for whether this is the new default sending alias.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('sendas', username)
properties = {}
properties['name'] = name
properties['address'] = address
properties['replyTo'] = reply_to
properties['makeDefault'] = self._bool2str(make_default)
return self._PostProperties(uri, properties)
def UpdateForwarding(self, username, enable, forward_to=None, action=None):
"""Update forwarding settings.
Args:
username: User to update forwarding for.
enable: Boolean whether to enable this forwarding rule.
forward_to: Email address to forward to.
action: Action to take after forwarding.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('forwarding', username)
properties = {}
properties['enable'] = self._bool2str(enable)
if enable is True:
properties['forwardTo'] = forward_to
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdatePop(self, username, enable, enable_for=None, action=None):
"""Update POP3 settings.
Args:
username: User to update POP3 settings for.
enable: Boolean whether to enable POP3.
enable_for: Which messages to make available via POP3.
action: Action to take after user retrieves email via POP3.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('pop', username)
properties = {}
properties['enable'] = self._bool2str(enable)
if enable is True:
properties['enableFor'] = enable_for
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdateImap(self, username, enable):
"""Update IMAP settings.
Args:
username: User to update IMAP settings for.
enable: Boolean whether to enable IMAP.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('imap', username)
properties = {'enable': self._bool2str(enable)}
return self._PutProperties(uri, properties)
def UpdateVacation(self, username, enable, subject=None, message=None,
contacts_only=None):
"""Update vacation settings.
Args:
username: User to update vacation settings for.
enable: Boolean whether to enable vacation responses.
subject: Vacation message subject.
message: Vacation message body.
contacts_only: Boolean whether to send message only to contacts.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('vacation', username)
properties = {}
properties['enable'] = self._bool2str(enable)
if enable is True:
properties['subject'] = subject
properties['message'] = message
properties['contactsOnly'] = self._bool2str(contacts_only)
return self._PutProperties(uri, properties)
def UpdateSignature(self, username, signature):
"""Update signature.
Args:
username: User to update signature for.
signature: Signature string.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('signature', username)
properties = {'signature': signature}
return self._PutProperties(uri, properties)
def UpdateLanguage(self, username, language):
"""Update user interface language.
Args:
username: User to update language for.
language: Language code.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('language', username)
properties = {'language': language}
return self._PutProperties(uri, properties)
def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None,
snippets=None, unicode=None):
"""Update general settings.
Args:
username: User to update general settings for.
page_size: Number of messages to show.
shortcuts: Boolean whether shortcuts are enabled.
arrows: Boolean whether arrows are enabled.
snippets: Boolean whether snippets are enabled.
unicode: Wheter unicode is enabled.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('general', username)
properties = {}
properties['pageSize'] = str(page_size)
properties['shortcuts'] = self._bool2str(shortcuts)
properties['arrows'] = self._bool2str(arrows)
properties['snippets'] = self._bool2str(snippets)
properties['unicode'] = self._bool2str(unicode)
return self._PutProperties(uri, properties)
| StarcoderdataPython |
9629206 | <filename>src/VideoSeparation/VideoSeparator.py<gh_stars>0
from os import write
from Utilities import Utilities as util
import cv2
import numpy as np
from tqdm import tqdm
from FaceDetection import FaceDetector as detector
class VideoSeparator:
def __init__(self, videoFileName):
self.videoFileName = videoFileName
self.videoFilePath = util.getVideoPath(videoFileName)
self.capture = cv2.VideoCapture(self.videoFilePath)
self.fps = self.capture.get(cv2.CAP_PROP_FPS)
self.size = (int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.fourcc = int(cv2.VideoWriter_fourcc('m', 'p', '4', 'v'))
def getFrameId(self, t):
return round(t * self.fps)
def apply(self):
intervalFileName = util.getIntervalPath(self.videoFileName)
videoFilePath = util.getSeparatedVideoFilePath(self.videoFileName)
intervals = np.genfromtxt(intervalFileName, delimiter=',')
for i in tqdm(range(len(intervals)), desc='Video separation progress'):
start = intervals[i][0]
end = intervals[i][1]
beginFrameId = self.getFrameId(start)
endFrameId = self.getFrameId(end)
writer = cv2.VideoWriter(
videoFilePath % (i + 1), self.fourcc, self.fps, self.size)
self.capture.set(cv2.CAP_PROP_POS_FRAMES, beginFrameId)
ret = True
frames = []
while(self.capture.isOpened() and ret and writer.isOpened()):
ret, frame = self.capture.read()
frameId = self.capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
if(frameId <= endFrameId):
writer.write(frame)
frames.append(frame)
else:
break
writer.release()
| StarcoderdataPython |
3210694 | from invoke import Collection, task
# Default tasks are in core.tasks
import core.tasks.app as app
import core.tasks.notebooks as notebooks
import core.tasks.setup as setup
import core.tasks.destroy as destroy
# Add default task collections
ns = Collection()
ns.add_collection(app)
ns = Collection(app)
ns.add_collection(notebooks)
ns.add_collection(setup)
ns.add_collection(destroy)
| StarcoderdataPython |
289732 | # Build a Boolean mask to filter out all the 'LAX' departure flights: mask
mask = df['Destination Airport'] == 'LAX'
# Use the mask to subset the data: la
la = df[mask]
# Combine two columns of data to create a datetime series: times_tz_none
times_tz_none = pd.to_datetime( la['Date (MM/DD/YYYY)'] + ' ' + la['Wheels-off Time'] )
# Localize the time to US/Central: times_tz_central
times_tz_central = times_tz_none.dt.tz_localize('US/Central')
# Convert the datetimes from US/Central to US/Pacific
times_tz_pacific = times_tz_central.dt.tz_convert('US/Pacific')
| StarcoderdataPython |
38277 | from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from IPython.core.debugger import Tracer; debug_here = Tracer();
batch_size = 5
max_it = tf.constant(6)
char_mat_1 = [[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.9, 0.0, 0.0]]
char_mat_2 = [[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_3 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]
char_mat_4 = [[0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
char_mat_5 = [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]]
#expected output: [5, 2, 4, 5, 4]
char_lst = [char_mat_1, char_mat_2, char_mat_3,
char_mat_4, char_mat_5]
np_char_tensor = np.array(char_lst)
char_prob = tf.constant(np.array(np_char_tensor), tf.float64)
char_prob = tf.transpose(char_prob, [1, 0, 2])
print(tf.Tensor.get_shape(char_prob))
sequence_length_lst = [1, 1, 1, 1, 1]
sequence_length = tf.constant(sequence_length_lst)
done_mask = tf.cast(tf.zeros(batch_size), tf.bool)
for time in range(0, 5):
print(time)
current_date = char_prob[:, time, :]
max_vals = tf.argmax(current_date, 1)
mask = tf.equal(max_vals, tf.constant(0, tf.int64))
current_mask = tf.logical_and(mask, tf.logical_not(done_mask))
done_mask = tf.logical_or(mask, done_mask)
time_vec = tf.ones(batch_size, tf.int32)*(time+2)
sequence_length = tf.select(done_mask, sequence_length, time_vec, name=None)
not_done_no = tf.reduce_sum(tf.cast(tf.logical_not(done_mask), tf.int32))
all_eos = tf.equal(not_done_no, tf.constant(0))
stop_loop = tf.logical_or(all_eos, tf.greater(time, max_it))
keep_working = tf.logical_not(stop_loop)
sess = tf.Session()
with sess.as_default():
tf.initialize_all_variables().run()
#print(char_prob.eval())
print(max_vals.eval())
print(mask.eval())
print(done_mask.eval())
print(sequence_length.eval())
print(keep_working.eval())
| StarcoderdataPython |
8110080 | import os
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
import cv2
import matplotlib.pyplot as plt
import torch
import albumentations as alb
from albumentations.augmentations import transforms as albtr
from albumentations.pytorch import ToTensor as albToTensor
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from data import data_utils as dt_utl
from image import image_proc as img_prc
from model import bengali_classifier
from model import senet
from model import modi_senet
from model import torch_data_utils as tdu
from model import train_utils as tru
from model import pred_utils as pru
pretrain_path_root = '../consideration/'
def get_checkpoint(path):
cp = torch.load(path, map_location=lambda storage, loc: storage)
return cp
def make_model_v0_4_1():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance, SENetEncoder_Multiscale_v2+ClassifierModule_v9
calib mixup,
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
ts_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers= [0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0]
output_layers = [2, 3, 4]
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
## train with encoder freeze
#tr_batch_size = 512
#ts_batch_size = 512
#tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
#vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
#
#epochs = 2
#lr = 1e-3 * tr_batch_size / 64
#grad_accum_steps = 1
#warmup_epoch=1
#patience=5
#factor=0.5
#opt = 'AdaBound'
#weight_decay=1e-4
#loss_w = [0.5, 0.25, 0.25]
#reference_label = labels[tr_idxs]
#
#model.freeze_encoder(freeze=True, target_layers=[0, 1, 2, 3, 4])
#model = tru.train_model_v2_1(model, tr_loader, vl_loader,
# epochs, lr, grad_accum_steps,
# warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label)
#model.freeze_encoder(freeze=False)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 110
lr = 1e-3
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v0_4_1_2():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance, SENetEncoder_Multiscale_v2+ClassifierModule_v9
calib mixup,
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
ts_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers= [0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0]
output_layers = [2, 3, 4]
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
## train with encoder freeze
#tr_batch_size = 512
#ts_batch_size = 512
#tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
#vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
#
#epochs = 2
#lr = 1e-3 * tr_batch_size / 64
#grad_accum_steps = 1
#warmup_epoch=1
#patience=5
#factor=0.5
#opt = 'AdaBound'
#weight_decay=1e-4
#loss_w = [0.5, 0.25, 0.25]
#reference_label = labels[tr_idxs]
#
#model.freeze_encoder(freeze=True, target_layers=[0, 1, 2, 3, 4])
#model = tru.train_model_v2_1(model, tr_loader, vl_loader,
# epochs, lr, grad_accum_steps,
# warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label)
#model.freeze_encoder(freeze=False)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 110
lr = 1e-3
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v0_4_2():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance(0.999), SENetEncoder_Multiscale_v2+ClassifierModule_v9
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
ts_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0]
output_layers = [2, 3, 4]
encoder = modi_senet.SENetEncoder_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train with encoder freeze
tr_batch_size = 512
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 2
lr = 1e-3 * tr_batch_size / 64
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
model.freeze_encoder(freeze=True, target_layers=[0, 1, 2, 3, 4])
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta)
model.freeze_encoder(freeze=False)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 110
lr = 1e-3
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v0_4_2_1():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance(0.999), SENetEncoder_Multiscale_v2+ClassifierModule_v9
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
ts_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0]
output_layers = [2, 3, 4]
encoder = modi_senet.SENetEncoder_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
model.load_state_dict(torch.load(os.path.join(pretrain_path_root, 'pretrained_200303_se_resnext50_32x4d_cb_multi2-3-4_v0', 'checkpoint'))['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
#epochs = 110
#lr = 1e-3
#grad_accum_steps = 1
#warmup_epoch=1
#patience=5
#factor=0.5
#opt = 'AdaBound'
#weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
#reference_label = labels[tr_idxs]
#cb_beta = 0.999
#model = tru.train_model_v2_1(model, tr_loader, vl_loader,
# epochs, lr, grad_accum_steps,
# warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta)
tru.test_model_v1(model, tr_loader, vl_loader, loss_w)
# save
#torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v0_4_2_tta1():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance(0.999), SENetEncoder_Multiscale_v2+ClassifierModule_v9
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0]
output_layers = [2, 3, 4]
encoder = modi_senet.SENetEncoder_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
model.load_state_dict(torch.load(os.path.join(pretrain_path_root, 'pretrained_200303_se_resnext50_32x4d_cb_multi2-3-4_v0', 'checkpoint'))['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
#vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# test
ts_batch_size = 512
#vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
#loss_w = [0.5, 0.25, 0.25]
#tru.test_model_v1(model, vl_loader, vl_loader, loss_w)
# test tta
print('tta')
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0, 0.1, -0.1], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0, -10, 10], scales=[0], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0, -30, 30], scales=[0], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0], shifts=[[0,0], [-0.05,-0.05], [0.05, 0.05]])
#tta = pru.TTA(model, ts_transformer, degrees=[0, -10, 10], scales=[0, 0.1, -0.1], shifts=[[0,0], [-0.05,-0.05], [0.05, 0.05]])
tta = pru.TTA(model, ts_transformer, degrees=[0, -10, 10], scales=[0, 0.1, -0.1], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0, -15, 15], scales=[0], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0, -0.05, 0.05], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0, -0.15, 0.15], shifts=[[0,0]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0], shifts=[[0,0], [-0.05, -0.05], [0.05, 0.05], [-0.05, 0.05], [0.05, -0.05]])
pred_label = pru.logit3_to_label(tta.predict(imgs[vl_idxs], ts_batch_size))
gra_score = tru.macro_recall(labels[vl_idxs, 0], pred_label[0])
vow_score = tru.macro_recall(labels[vl_idxs, 1], pred_label[1])
con_score = tru.macro_recall(labels[vl_idxs, 2], pred_label[2])
print('total_score ', 0.5 * gra_score + 0.25 * vow_score + 0.25 * con_score)
print('gra_score ', gra_score)
print('vow_score ', vow_score)
print('con_score ', con_score)
print(0.5 * gra_score + 0.25 * vow_score + 0.25 * con_score, ',', gra_score, ',', vow_score, ',', con_score)
return
def make_model_v0_4_3():
"""
mixup, mish, cutmix, MultilabelStratifiedKFold, train_model_v2_1, class balance(0.999), SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock
"""
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
#albtr.RandomGamma(p=0.8),
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
# albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
# albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
# ], p=0.2),
#alb.OneOf([
# #albtr.Blur(blur_limit=4, p=0.2),
# albtr.MedianBlur(blur_limit=3, p=0.2)
# ], p=0.1),
#alb.OneOf([
# GridMask(num_grid=(3,7), mode=0, rotate=15, p=1),
# GridMask(num_grid=(3,7), mode=2, rotate=15, p=1),
# ], p=0.7),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
ts_transformer = tdu.transform_wrapper(
[
],
alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
)
## model
#encoder = modi_senet.SENetEncoder(senet.se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
#encoder = modi_senet.SENetEncoder_ThreeNeck(senet.se_resnet50(num_classes=1000, pretrained='imagenet'))
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
encoder_is_separately = three_neck
encoder_use_mixup = True
dropout_p = 0.0
gem_p = None
classifier_module = bengali_classifier.ClassifierModule_v9
model = bengali_classifier.BengaliClassifier_v1(encoder, encoded_planes,
encoder_is_separately, encoder_use_mixup,
dropout_p, gem_p,
classifier_module=classifier_module,
)
## training
#fld = ShuffleSplit(n_splits=5, test_size=.99, random_state=2020)
fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
id = np.arange(len(labels))[:,None]
for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
if ifld == 0:
print('training fold ', ifld)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
## train with encoder freeze
#tr_batch_size = 512
#ts_batch_size = 512
#tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
#vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
#
#epochs = 2
#lr = 1e-3 * tr_batch_size / 64
#grad_accum_steps = 1
#warmup_epoch=1
#patience=5
#factor=0.5
#opt = 'AdaBound'
#weight_decay=1e-4
#loss_w = [0.5, 0.25, 0.25]
#reference_label = labels[tr_idxs]
#cb_beta = 0.999
#
#model.freeze_encoder(freeze=True, target_layers=[0, 1, 2, 3, 4])
#model = tru.train_model_v2_1(model, tr_loader, vl_loader,
# epochs, lr, grad_accum_steps,
# warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta)
#model.freeze_encoder(freeze=False)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 110
lr = 1e-3
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_0():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 1000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 120
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def test_model_v1_0_0():
CHECKPOINT_PATH = '../trained_model/20200308_make_model_v1_0_0/checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
], p=0.5),
alb.OneOf([
albtr.MedianBlur(blur_limit=3, p=0.2)
], p=0.3),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
vl_idxs = np.arange(len(labels))
#vl_idxs = vl_idxs[:1000]
# test
ts_batch_size = 512
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
loss_w = [0.5, 0.25, 0.25]
tru.test_model_v1(model, None, vl_loader, loss_w)
# tta
#print('tta')
#ts_batch_size = 512
#tta = pru.TTA(model, ts_transformer, degrees=[0, -10, 10], scales=[0, 0.1, -0.1], shifts=[[0,0], [-0.05,-0.05], [0.05, 0.05]])
#tta = pru.TTA(model, ts_transformer, degrees=[0], scales=[0], shifts=[[0,0]])
#pred_logit = tta.predict(imgs[vl_idxs], ts_batch_size)
#pred_label = pru.logit3_to_label(pred_logit)
#
#gra_score = tru.macro_recall(labels[vl_idxs, 0], pred_label[0])
#vow_score = tru.macro_recall(labels[vl_idxs, 1], pred_label[1])
#con_score = tru.macro_recall(labels[vl_idxs, 2], pred_label[2])
#print('total_score ', 0.5 * gra_score + 0.25 * vow_score + 0.25 * con_score)
#print('gra_score ', gra_score)
#print('vow_score ', vow_score)
#print('con_score ', con_score)
#print(0.5 * gra_score + 0.25 * vow_score + 0.25 * con_score, ',', gra_score, ',', vow_score, ',', con_score)
#
#tru.save_preds(labels[vl_idxs,0], labels[vl_idxs,1], labels[vl_idxs,2], pred_label[0], pred_label[1], pred_label[2], pred_logit[0], pred_logit[1], pred_logit[2], 'tta_vl_')
def make_model_v1_0_0_1():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
#albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 1000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 120
lr = 5e-5
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.9999
#start_epoch = 0 if CP is None else CP['epoch']
#opt_state_dict = None if CP is None else CP['optimizer']
start_epoch = 0
opt_state_dict = None
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_1():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.99),
"""
CHECKPOINT_PATH = 'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.99
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def test_model_v1_0_1():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.99),
"""
CHECKPOINT_PATH = '../trained_model/20200308_make_model_v1_0_1/checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
#albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
], p=0.5),
alb.OneOf([
albtr.MedianBlur(blur_limit=3, p=0.2)
], p=0.3),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
vl_idxs = np.arange(len(labels))
# test
ts_batch_size = 512
tr_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], tr_transformer)
tr_loader = tdu.get_dataloader(tr_ds, ts_batch_size, shuffle=False)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
loss_w = [0.5, 0.25, 0.25]
#tru.test_model_v1(model, tr_loader, vl_loader, loss_w)
tru.test_model_v1(model, None, vl_loader, loss_w)
return
def make_model_v1_0_3():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.99),
"""
CHECKPOINT_PATH = None #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [3, 4] #[2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.99
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_4():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.99),
"""
CHECKPOINT_PATH = None #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2, 3]
cutmix_alpha = 1.0
cutmix_cand_layers = [0, 1, 2, 3]
#output_layers = [3, 4] #[2, 3, 4]
#dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_Mixup(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
#output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = 2048 #[1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v1(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.99
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_5():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2, 3] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [4] #[2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def test_model_v1_0_5():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = '../trained_model/20200310_make_model_v1_0_5/checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
#albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
], p=0.5),
alb.OneOf([
albtr.MedianBlur(blur_limit=3, p=0.2)
], p=0.3),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2, 3] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [4] #[2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
vl_idxs = np.arange(len(labels))
# test
ts_batch_size = 512
tr_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], tr_transformer)
tr_loader = tdu.get_dataloader(tr_ds, ts_batch_size, shuffle=False)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
loss_w = [0.5, 0.25, 0.25]
#tru.test_model_v1(model, tr_loader, vl_loader, loss_w)
tru.test_model_v1(model, None, vl_loader, loss_w)
return
def make_model_v1_0_5_1():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [4] #[2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 1e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
#start_epoch = 0
#opt_state_dict = None
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_6():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
PreprocPipeline_v4
"""
CHECKPOINT_PATH = None #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
imgs = dt_utl.get_image(type_is_train=True, height=137, width=236, data_idxs=[0,1,2,3])
pp_pl = img_prc.PreprocPipeline_v4()
imgs = pp_pl.preprocessing(imgs)
pp_pl.save_imgs(imgs)
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2, 3] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [4] #[2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 60
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_7():
"""
mish, mixup, cutmix,
upsampling
dropblock,
MultilabelStratifiedKFold,
class balance(0.99),
"""
CHECKPOINT_PATH = None #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0, 1, 2, 3]
#output_layers = [3, 4] #[2, 3, 4]
dropblock_p = 0.2
upsample_size = (32*6, 32*6)
calib_mixup = True
encoder = modi_senet.SENetEncoder_Mixup(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
dropblock_p=dropblock_p, upsample_size=upsample_size, calib_mixup=calib_mixup,
)
encoded_planes = 2048 #[1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v1(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 24
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.99
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_8():
"""
mish, mixup, cutmix,
upsampling
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0, 1, 2, 3]
#output_layers = [3, 4] #[2, 3, 4]
dropblock_p = 0.2
upsample_size = None
calib_mixup = True
encoder = modi_senet.SENetEncoder_Mixup(get_senet(num_classes=1000, pretrained=None),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
dropblock_p=dropblock_p, upsample_size=upsample_size, calib_mixup=calib_mixup,
)
encoded_planes = 2048 #[1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v1(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 1e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
#start_epoch = 0
#opt_state_dict = None
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def test_model_v1_0_8():
"""
mish, mixup, cutmix,
upsampling
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = '../trained_model/20200313_make_model_v1_0_8/checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
#albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
], p=0.5),
alb.OneOf([
albtr.MedianBlur(blur_limit=3, p=0.2)
], p=0.3),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0, 1, 2, 3]
#output_layers = [3, 4] #[2, 3, 4]
dropblock_p = 0.2
upsample_size = None
calib_mixup = True
encoder = modi_senet.SENetEncoder_Mixup(get_senet(num_classes=1000, pretrained=None),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
dropblock_p=dropblock_p, upsample_size=upsample_size, calib_mixup=calib_mixup,
)
encoded_planes = 2048 #[1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v1(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
vl_idxs = np.arange(len(labels))
# test
ts_batch_size = 512
tr_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], tr_transformer)
tr_loader = tdu.get_dataloader(tr_ds, ts_batch_size, shuffle=False)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
loss_w = [0.5, 0.25, 0.25]
#tru.test_model_v1(model, tr_loader, vl_loader, loss_w)
tru.test_model_v1(model, None, vl_loader, loss_w)
return
def make_model_v1_0_9():
"""
mish, mixup, cutmix,
upsampling
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v1()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext101_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.2
mix_cand_layers=[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0, 1, 2, 3]
#output_layers = [3, 4] #[2, 3, 4]
dropblock_p = 0.2
upsample_size = None
calib_mixup = False
encoder = modi_senet.SENetEncoder_Mixup(get_senet(num_classes=1000, pretrained=None),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
dropblock_p=dropblock_p, upsample_size=upsample_size, calib_mixup=calib_mixup,
)
encoded_planes = 2048 #[1024, 2048] #[512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v1(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=5, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 10000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 140
lr = 1e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
#start_epoch = 0
#opt_state_dict = None
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_10():
"""
PreprocPipeline_v5
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
class balance(0.999),
"""
CHECKPOINT_PATH = None #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
imgs = dt_utl.get_image(type_is_train=True, height=137, width=236, data_idxs=[0,1,2,3])
pp_pl = img_prc.PreprocPipeline_v5()
imgs = pp_pl.preprocessing(imgs)
pp_pl.save_imgs(imgs)
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 0.5
mix_cand_layers=[0, 1, 2, 3] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained='imagenet'),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 1000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 120
lr = 5e-4
grad_accum_steps = 1
warmup_epoch=1
patience=5
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def make_model_v1_0_11():
"""
PreprocPipeline_v5
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
class balance(0.999),
"""
CHECKPOINT_PATH = 'checkpoint' #'checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
#imgs = dt_utl.get_image(type_is_train=True, height=137, width=236, data_idxs=[0,1,2,3])
pp_pl = img_prc.PreprocPipeline_v5()
#imgs = pp_pl.preprocessing(imgs)
#pp_pl.save_imgs(imgs)
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Cutout(num_holes=1, max_h_size=32, max_w_size=32, always_apply=False, p=0.5),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained=None),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
## training
#fld = ShuffleSplit(n_splits=5, test_size=.001, random_state=2020)
#fld = MultilabelStratifiedKFold(n_splits=1000, random_state=2020)
#id = np.arange(len(labels))[:,None]
#for ifld, (tr_idxs, vl_idxs) in enumerate(fld.split(id, labels)):
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
tr_idxs = np.arange(len(labels))
vl_idxs = np.random.choice(tr_idxs, 1000, replace=False)
tr_ds = tdu.ImgDataset(imgs[tr_idxs], labels[tr_idxs], tr_transformer)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
# train
tr_batch_size = 64
ts_batch_size = 512
tr_loader = tdu.get_dataloader(tr_ds, tr_batch_size)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
epochs = 120
lr = 1e-4
grad_accum_steps = 1
warmup_epoch=2
patience=3
factor=0.5
opt = 'AdaBound'
weight_decay=1e-4 #0.0
loss_w = [0.5, 0.25, 0.25]
reference_label = labels[tr_idxs]
cb_beta = 0.999
start_epoch = 0 if CP is None else CP['epoch']
opt_state_dict = None if CP is None else CP['optimizer']
#start_epoch = 0
#opt_state_dict = None
model = tru.train_model_v2_1(model, tr_loader, vl_loader,
epochs, lr, grad_accum_steps,
warmup_epoch, patience, factor, opt, weight_decay, loss_w, reference_label, cb_beta, start_epoch, opt_state_dict)
# save
torch.save(model.state_dict(), 'bengali_model')
return
def test_model_v1_0_11():
"""
mish, mixup, cutmix,
SENetEncoder_Multiscale_v2+ClassifierModule_v9,
dropblock,
MultilabelStratifiedKFold,
class balance(0.999),
"""
CHECKPOINT_PATH = '../trained_model/20200316_make_model_v1_0_11/checkpoint' # None
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
pp_pl = img_prc.PreprocPipeline_v5()
imgs = pp_pl.load_imgs()
labels = dt_utl.get_train_label()
# transformer
tr_transformer = alb.Compose([
#albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, p=1),
alb.OneOf([albtr.ElasticTransform(p=0.1, alpha=30, sigma=7, alpha_affine=0,border_mode=cv2.BORDER_CONSTANT),
#albtr.GridDistortion(num_steps=20, distort_limit=0.3 ,border_mode=cv2.BORDER_CONSTANT,p=0.1),
albtr.OpticalDistortion(distort_limit=0.8, shift_limit=0.3,border_mode=cv2.BORDER_CONSTANT, p=0.1)
], p=0.5),
alb.OneOf([
albtr.MedianBlur(blur_limit=3, p=0.2)
], p=0.3),
albtr.Normalize(0.5, 0.5),
albToTensor()
])
## model
get_senet = senet.se_resnext50_32x4d #senet.se_resnet152, senet.se_resnext50_32x4d, senet.se_resnext101_32x4d, senet.se_resnet50
input3ch = False
three_neck = False
use_mish = True
mixup_alpha = 1.0
mix_cand_layers=[0, 1, 2] #[0, 1, 2, 3]
cutmix_alpha = None #1.0
cutmix_cand_layers = None #[0]
output_layers = [2, 3, 4]
dropblock_p = 0.2
encoder = modi_senet.SENetEncoder_CalibMixup_Multiscale_v2(get_senet(num_classes=1000, pretrained=None),
input3ch=input3ch,
three_neck=three_neck, mixup_alpha=mixup_alpha, mix_cand_layers=mix_cand_layers,
use_mish=use_mish, cutmix_alpha=cutmix_alpha, cutmix_cand_layers=cutmix_cand_layers,
output_layers=output_layers, dropblock_p=dropblock_p,
)
encoded_planes = [512, 1024, 2048] #4 * 512
dropout_p = 0.1
classifier = bengali_classifier.ClassifierModule_v9(encoded_planes, dropout_p=dropout_p)
encoder_use_mixup = True
model = bengali_classifier.BengaliClassifier_v2(encoder, classifier,
encoder_use_mixup,
)
if CP is not None:
model.load_state_dict(CP['state_dict'])
for ifld in range(1):
if ifld == 0:
print('training fold ', ifld)
vl_idxs = np.arange(len(labels))
# test
ts_batch_size = 512
tr_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], tr_transformer)
tr_loader = tdu.get_dataloader(tr_ds, ts_batch_size, shuffle=False)
vl_ds = tdu.ImgDataset(imgs[vl_idxs], labels[vl_idxs], ts_transformer)
vl_loader = tdu.get_dataloader(vl_ds, ts_batch_size, shuffle=False)
loss_w = [0.5, 0.25, 0.25]
#tru.test_model_v1(model, tr_loader, vl_loader, loss_w)
tru.test_model_v1(model, None, vl_loader, loss_w)
return | StarcoderdataPython |
162698 | import ctypes as ct
import numpy as np
import sharpy.utils.algebra as algebra
import sharpy.aero.utils.uvlmlib as uvlmlib
import sharpy.utils.cout_utils as cout
import sharpy.utils.settings as settings
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.generator_interface as gen_interface
@solver
class StaticUvlm(BaseSolver):
"""
``StaticUvlm`` solver class, inherited from ``BaseSolver``
Aerodynamic solver that runs a UVLM routine to solve the steady or unsteady aerodynamic problem.
The aerodynamic problem is posed in the form of an ``Aerogrid`` object.
Args:
data (PreSharpy): object with problem data
custom_settings (dict): custom settings that override the settings in the solver ``.txt`` file. None by default
Attributes:
settings (dict): Name-value pair of settings employed by solver. See Notes for valid combinations
settings_types (dict): Acceptable data types for entries in ``settings``
settings_default (dict): Default values for the available ``settings``
data (PreSharpy): object containing the information of the problem
velocity_generator(object): object containing the flow conditions information
"""
solver_id = 'StaticUvlm'
solver_classification = 'aero'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Print info to screen'
settings_types['horseshoe'] = 'bool'
settings_default['horseshoe'] = False
settings_description['horseshoe'] = 'Horseshoe wake modelling for steady simulations.'
settings_types['num_cores'] = 'int'
settings_default['num_cores'] = 0
settings_description['num_cores'] = 'Number of cores to use in the VLM lib'
settings_types['n_rollup'] = 'int'
settings_default['n_rollup'] = 1
settings_description['n_rollup'] = 'Number of rollup iterations for free wake. Use at least ``n_rollup > 1.1*m_star``'
settings_types['rollup_dt'] = 'float'
settings_default['rollup_dt'] = 0.1
settings_description['rollup_dt'] = 'Pseudo time step for wake convection. Chose it so that it is similar to the unsteady time step'
settings_types['rollup_aic_refresh'] = 'int'
settings_default['rollup_aic_refresh'] = 1
settings_description['rollup_dt'] = 'Controls when the AIC matrix is refreshed during the wake rollup'
settings_types['rollup_tolerance'] = 'float'
settings_default['rollup_tolerance'] = 1e-4
settings_description['rollup_tolerance'] = 'Convergence criterium for rollup wake'
settings_types['iterative_solver'] = 'bool'
settings_default['iterative_solver'] = False
settings_description['iterative_solver'] = 'Not in use'
settings_types['iterative_tol'] = 'float'
settings_default['iterative_tol'] = 1e-4
settings_description['iterative_tol'] = 'Not in use'
settings_types['iterative_precond'] = 'bool'
settings_default['iterative_precond'] = False
settings_description['iterative_precond'] = 'Not in use'
settings_types['velocity_field_generator'] = 'str'
settings_default['velocity_field_generator'] = 'SteadyVelocityField'
settings_description['velocity_field_generator'] = 'Name of the velocity field generator to be used in the simulation'
settings_types['velocity_field_input'] = 'dict'
settings_default['velocity_field_input'] = {}
settings_description['velocity_field_input'] = 'Dictionary of settings for the velocity field generator'
settings_types['rho'] = 'float'
settings_default['rho'] = 1.225
settings_description['rho'] = 'Air density'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
# settings list
self.data = None
self.settings = None
self.velocity_generator = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
self.update_step()
# init velocity generator
velocity_generator_type = gen_interface.generator_from_string(
self.settings['velocity_field_generator'])
self.velocity_generator = velocity_generator_type()
self.velocity_generator.initialise(self.settings['velocity_field_input'])
def run(self):
if not self.data.aero.timestep_info[self.data.ts].zeta:
return self.data
# generate uext
self.velocity_generator.generate({'zeta': self.data.aero.timestep_info[self.data.ts].zeta,
'override': True,
'for_pos': self.data.structure.timestep_info[self.data.ts].for_pos[0:3]},
self.data.aero.timestep_info[self.data.ts].u_ext)
# grid orientation
uvlmlib.vlm_solver(self.data.aero.timestep_info[self.data.ts],
self.settings)
return self.data
def next_step(self):
""" Updates de aerogrid based on the info of the step, and increases
the self.ts counter """
self.data.aero.add_timestep()
self.update_step()
def update_step(self):
self.data.aero.generate_zeta(self.data.structure,
self.data.aero.aero_settings,
self.data.ts)
# for i_surf in range(self.data.aero.timestep_info[self.data.ts].n_surf):
# self.data.aero.timestep_info[self.data.ts].forces[i_surf].fill(0.0)
# self.data.aero.timestep_info[self.data.ts].dynamic_forces[i_surf].fill(0.0)
| StarcoderdataPython |
3331504 | '''
@ file function: 词嵌入向量
@ author: 王中琦
@ date: 2022/3/15
'''
from gensim.models.word2vec import Word2Vec
from numpy import array
def seg_word(all) -> list:
'''
去除停用词
'''
stopwords = set()
with open('stopwords.txt', 'r', encoding='utf-8') as fr:
for i in fr:
stopwords.add(i.strip()) # 删去所有停词
return list(filter(lambda x: x not in stopwords, all))
def count_feature(file_name) -> array:
'''
文件主函数,计算文本词语特征
'''
all_sentence = []
with open(file_name, 'r', encoding='utf-8') as inp:
for line in inp.readlines(): # 读入每行文字
all_words = []
line = line.rstrip("\n") # 去掉换行符
words = line.split(" ") # 以空格分割开
words.pop()
for word in words:
all_words.append(word.split("/")[0])
all_words = seg_word(all_words)
all_sentence.append(all_words)
print("读入数据完毕")
return all_sentence
def model_train(token_list):
num_features = 497 # 特征维度
min_word_count = 1 # 词语最小出现出现频率
num_workers = 1 # numworker读取器数量
window_size = 3 # 窗口大小为3
subsampling = 1e-3
model = Word2Vec(
token_list,
workers=num_workers,
vector_size=num_features,
min_count=min_word_count,
window=window_size,
sample=subsampling,
epochs=100, # 训练轮数
sg=1
)
model.init_sims(replace=True)
model_name = "my_word2vec_skip" # 保存到该文件中
model.save(model_name)
return True
def main():
file_name = file_name = "./data/train/name_train.txt"
token_list = count_feature(file_name)
if(model_train(token_list)):
print("训练完成")
model = Word2Vec.load("my_word2vec_skip") # 读取word2vector模型
# 查看与目标词最相近(通过计算余弦相似度)的词语
for e in model.wv.most_similar(positive=['泽民'], topn=10):
print(e[0], e[1]) # 输出词语及其向量
if __name__ == "__main__":
main()
| StarcoderdataPython |
3291325 | <gh_stars>1-10
from aiogram.dispatcher.filters.state import StatesGroup, State
class DeathNote(StatesGroup):
surname_first_name = State()
cause_of_death = State() | StarcoderdataPython |
1822967 | import numpy as np
from matplotlib import pyplot as plt
from .activation_functions import sigmoid, sigmoid_prime
def predict(features, weights):
'''
Trả về một mảng 1D là các xác suất
mà nhãn danh mục đó == 1
'''
z = np.dot(features, weights)
return sigmoid(z)
def cost_function(features, labels, weights):
'''
Sử dụng MAE (Mean Absolute Error)
Features:(100,3)
Labels: (100,1)
Weights:(3,1)
Trả về mảng 1D các dự đoán
cost = (labels*log(predictions) + (1-labels)*log(1-predictions) ) / len(labels)
'''
# Số quan sát trong tập dữ liệu
observations = len(labels)
predictions = predict(features, weights)
# Tính sai số nếu nhãn = 1
class1_cost = -labels*np.log(predictions)
# Tính sai số nếu nhãn = 1
class2_cost = (1-labels)*np.log(1-predictions)
# Tính tổng của cả 2 loại sai số
cost = class1_cost - class2_cost
# Tính trung bình lỗi làm chi phí
cost = cost.sum() / observations
return cost
def update_weights(features, labels, weights, lr):
'''
Thuật toán Hạ Gradient được vector hoá
Features:(200, 3)
Labels: (200, 1)
Weights:(3, 1)
'''
N = len(features)
#1 - Dự đoán kết quả
predictions = predict(features, weights)
#2 - Chuyển vị ma trận đặc trưng từ kích thước (200, 3) về (3, 200)
# để ta có thể nhân với ma trận sai số (200,1).
# Trả về một ma trận (3,1) gồm có 3 đạo hàm riêng -
# mỗi đạo hàm cho một đặc trưng -- đại diện cho tổng
# độ nghiêng của hàm chi phí qua tất cả các quan sát.
gradient = np.dot(features.T, predictions - labels)
#3 - Tính trung bình đạo hàm của hàm chi phí với mỗi đặc trưng
gradient /= N
#4 - Nhân gradient với tốc độ học
gradient *= lr
#5 - Cập nhật trọng số bằng cách trừ đi gradient để tối thiểu hoá chi phí
weights -= gradient
return weights
def decision_boundary(prob):
return 1 if prob >= .5 else 0
def classify(predictions):
'''
input - N element array of predictions between 0 and 1
output - N element array of 0s (False) and 1s (True)
'''
decision_boundary = np.vectorize(decision_boundary)
return decision_boundary(predictions).flatten()
def train(features, labels, weights, lr, iters):
cost_history = []
for i in range(iters):
weights = update_weights(features, labels, weights, lr)
#Calculate error for auditing purposes
cost = cost_function(features, labels, weights)
cost_history.append(cost)
# Log Progress
if i % 1000 == 0:
print("iter: "+str(i) + " cost: "+str(cost))
return weights, cost_history
def accuracy(predicted_labels, actual_labels):
diff = predicted_labels - actual_labels
return 1.0 - (float(np.count_nonzero(diff)) / len(diff))
def plot_decision_boundary(trues, falses):
fig = plt.figure()
ax = fig.add_subplot(111)
no_of_preds = len(trues) + len(falses)
ax.scatter([i for i in range(len(trues))], trues, s=25, c='b', marker="o", label='Trues')
ax.scatter([i for i in range(len(falses))], falses, s=25, c='r', marker="s", label='Falses')
plt.legend(loc='upper right');
ax.set_title("Decision Boundary")
ax.set_xlabel('N/2')
ax.set_ylabel('Predicted Probability')
plt.axhline(.5, color='black')
plt.show()
| StarcoderdataPython |
11354402 | from typing import Tuple, Union
import numpy as np
from scipy.spatial.transform import Rotation
def generate_billboards_2d(coords: np.ndarray, size: Union[float, np.ndarray] =20) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns (vertices, faces, texture coordinates) of a <n> standard 2D billboards of given size(s)
"""
verts0 = np.array([[-0.5, -0.5],
[0.5, -0.5],
[0.5, 0.5],
[-0.5, 0.5]]).astype(np.float32)
n = len(coords)
if np.isscalar(size):
size = np.ones(n)*size
else:
size = np.asarray(size)
assert len(size)==n and size.ndim==1
verts = size[:,np.newaxis, np.newaxis]*verts0[np.newaxis]
verts = verts.reshape((-1,verts.shape[-1]))
# add time/z dimensions if present
if coords.shape[1]>2:
coords = np.repeat(coords, 4, axis=0)
verts = np.concatenate([coords[:,:-2], verts], axis=-1)
texcoords0 = np.array([[0, 0],
[1, 0],
[1, 1],
[0, 1]]).astype(np.float32)
texcoords = np.tile(texcoords0,(n,1))
faces = np.tile(np.array([[0,1,2],[0,3,2]]),(n,1))
faces = faces+np.repeat(np.repeat(4*np.arange(n)[:,np.newaxis],3,axis=-1),2 ,axis=0)
return verts, faces, texcoords
def unit_quat_random(n:int) -> np.ndarray:
"""generate array of random unit quaternions representing rotations (only spatial part)"""
# q = np.random.normal(0,1,(n,4))
# q = q/np.linalg.norm(q, axis=-1, keepdims=True)
q = Rotation.random(n).as_quat()
return q[:,1:]
def _unit_quat_3to4(p: np.ndarray) -> np.ndarray:
w = np.sqrt(1-np.sum(p**2, axis=-1, keepdims=True))
return np.concatenate((w,p), axis=-1)
def unit_quat_scale(p: np.ndarray, scale: float = 1) -> np.ndarray:
pp = Rotation.from_quat(_unit_quat_3to4(p))
qq = Rotation.from_quat(_unit_quat_3to4(q))
r = Rotation.concatenate((pp,qq)).as_quat()
return r[:,1:]
def unit_quat_multiply(p: np.ndarray, q: np.ndarray) -> np.ndarray:
pp = Rotation.from_quat(_unit_quat_3to4(p))
qq = Rotation.from_quat(_unit_quat_3to4(q))
r = Rotation.concatenate((pp,qq)).as_quat()
return r[:,1:]
def rotvec_to_quatvec(p: np.ndarray):
return Rotation.from_rotvec(p).as_quat()[:,1:]
# def unit_quaternion_multiply(p: np.ndarray, q: np.ndarray) -> np.ndarray:
# assert p.shape==q.shape and p.ndim==2 and p.shape[1] == 3
# x0, y0, z0 = p.T
# w0 = np.sqrt(1-np.linalg.sum(p**2, axis=-1))
# x1, y1, z1 = q.T
# w1 = np.sqrt(1-np.linalg.sum(q**2, axis=-1))
# return np.stack([x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
# -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
# x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], axis=-1)
# def unit_quaternion_angle(x: np.ndarray) -> np.ndarray:
if __name__ == "__main__":
coords = np.random.uniform(0,1,(4,3))
verts, faces, texc = generate_billboards_2d(coords, size=1)
print(verts.shape)
| StarcoderdataPython |
6470370 | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from app.core.constants import STEP_LIST_OBJECT_ROLE
from app.sections.step.step_store import StepEntity
from app.themes.theme_provider import is_dark
PADDING = 5
def step_selected_rect():
return QColor("#505153") if is_dark() else QColor("#CBD8E1")
def step_selected_pen():
return Qt.white if is_dark() else Qt.black
def step_bounded_rect_pen():
return Qt.white
def step_bounded_rect_fill():
return QColor("#6ea0da")
def success_color():
return QColor("#01721d") if is_dark() else QColor("#01721d")
class StepItemDelegate(QStyledItemDelegate):
TITLE_FONT_SIZE = 12
TITLE_FONT_BOLD = True
DESCRIPTION_FONT_SIZE = 10
DESCRIPTION_FONT_BOLD = False
def sizeHint(self, option: QStyleOptionViewItem, model_index: QModelIndex):
if not model_index.isValid():
return
bounding_rect = option.rect
step_entity: StepEntity = model_index.data(STEP_LIST_OBJECT_ROLE)
step_title = step_entity.title
# title
font: QFont = QApplication.font()
font.setPointSize(self.TITLE_FONT_SIZE)
font.setBold(self.TITLE_FONT_BOLD)
font_metrics: QFontMetrics = QFontMetrics(font)
title_rect = font_metrics.boundingRect(
0, 0, bounding_rect.width(), 0, Qt.AlignLeft | Qt.AlignTop, step_title
)
size: QSize = QSize(
option.rect.width(), title_rect.height() + 10 * PADDING,
)
return size
def paint(
self, painter: QPainter, option: QStyleOptionViewItem, model_index: QModelIndex
):
if not model_index.isValid():
return
bounding_rect = option.rect
painter.save()
if option.state & QStyle.State_Selected:
painter.fillRect(bounding_rect, step_selected_rect())
painter.setPen(step_selected_pen())
step_entity: StepEntity = model_index.data(STEP_LIST_OBJECT_ROLE)
step_title = step_entity.title
step_description = step_entity.description
step_type = step_entity.step_type.value
padded_step_type = f" {step_type} "
# start draw type
font: QFont = QApplication.font()
font.setPointSize(self.TITLE_FONT_SIZE)
font_metrics: QFontMetrics = QFontMetrics(font)
step_type_rect: QRect = font_metrics.boundingRect(
bounding_rect.left() + PADDING,
bounding_rect.top() + PADDING,
0,
0,
Qt.AlignLeft | Qt.AlignTop,
padded_step_type,
)
painter.setRenderHint(QPainter.Antialiasing)
path = QPainterPath()
path.addRoundedRect(QRectF(step_type_rect), 2, 2)
painter.fillPath(path, step_bounded_rect_fill())
painter.setFont(font)
painter.setPen(step_bounded_rect_pen())
painter.drawText(step_type_rect, Qt.AlignLeft | Qt.AlignTop, padded_step_type)
# end draw type
# start draw title
font: QFont = QApplication.font()
font.setPointSize(self.TITLE_FONT_SIZE)
font.setBold(self.TITLE_FONT_BOLD)
font_metrics: QFontMetrics = QFontMetrics(font)
elided_title = font_metrics.elidedText(
step_title, Qt.ElideRight, bounding_rect.width() - 10 * PADDING
)
# title
title_rect = font_metrics.boundingRect(
step_type_rect.right() + PADDING,
bounding_rect.top() + PADDING,
bounding_rect.width() - 10 * PADDING,
0,
Qt.AlignLeft | Qt.AlignTop,
elided_title,
)
painter.setFont(font)
painter.setPen(step_selected_pen())
painter.drawText(
title_rect, Qt.AlignLeft | Qt.AlignTop | Qt.TextWordWrap, elided_title
)
# end draw title
# start draw description
font.setPointSize(self.DESCRIPTION_FONT_SIZE)
font.setBold(self.DESCRIPTION_FONT_BOLD)
font_metrics: QFontMetrics = QFontMetrics(font)
elided_description = font_metrics.elidedText(
step_description, Qt.ElideMiddle, bounding_rect.width() - 5 * PADDING
)
description_rect = font_metrics.boundingRect(
step_type_rect.left(),
step_type_rect.bottom() + 2 * PADDING,
bounding_rect.width() - PADDING,
0,
Qt.AlignLeft | Qt.AlignTop,
elided_description,
)
painter.setFont(font)
painter.setPen(step_selected_pen())
painter.drawText(
description_rect,
Qt.AlignLeft | Qt.AlignTop | Qt.TextWordWrap,
elided_description,
)
# end draw description
painter.restore()
class CustomStepsListView(QListView):
dropEventSignal = pyqtSignal(QModelIndex)
def __init__(self, parent=None):
super().__init__(parent)
def dropEvent(self, e: QDropEvent):
super().dropEvent(e)
model_index = self.indexAt(e.pos())
self.dropEventSignal.emit(model_index)
| StarcoderdataPython |
3309673 | <reponame>ajarai/fast-weights<filename>fw/train.py
import cPickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import time
import sys
from data_utils import (
generate_epoch,
)
from model import (
fast_weights_model,
)
from custom_GRU import (
gru_model,
)
class parameters():
def __init__(self):
self.input_dim = 9
self.num_classes = 26+10+1
self.num_epochs = 1000
self.batch_size = 128
self.num_hidden_units = 50
self.l = 0.95 # decay lambda
self.e = 0.5 # learning rate eta
self.S = 1 # num steps to get to h_S(t+1)
self.learning_rate = 1e-4
self.learning_rate_decay_factor = 0.99 # don't use this decay
self.max_gradient_norm = 5.0
self.data_dir = 'data/'
self.ckpt_dir = 'checkpoints/'
self.save_every = max(1, self.num_epochs//4) # save every 500 epochs
def create_model(sess, FLAGS):
if FLAGS.model_name == 'GRU-LN':
fw_model = gru_model(FLAGS)
else:
fw_model = fast_weights_model(FLAGS)
ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Restoring old model parameters from %s" %
ckpt.model_checkpoint_path)
fw_model.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Created new model.")
sess.run(tf.initialize_all_variables())
return fw_model
def train(FLAGS):
"""
Train the model on the associative retrieval task.
"""
# Load the train/valid datasets
print "Loading datasets:"
with open(os.path.join(FLAGS.data_dir, 'train.p'), 'rb') as f:
train_X, train_y = cPickle.load(f)
print "train_X:", np.shape(train_X), ",train_y:", np.shape(train_y)
with open(os.path.join(FLAGS.data_dir, 'valid.p'), 'rb') as f:
valid_X, valid_y = cPickle.load(f)
print "valid_X:", np.shape(valid_X), ",valid_y:", np.shape(valid_y)
with tf.Session() as sess:
# Load the model
model = create_model(sess, FLAGS)
start_time = time.time()
# Start training
train_epoch_loss = []; valid_epoch_loss = []
train_epoch_accuracy = []; valid_epoch_accuracy = []
train_epoch_gradient_norm = []
for train_epoch_num, train_epoch in enumerate(generate_epoch(
train_X, train_y, FLAGS.num_epochs, FLAGS.batch_size)):
print "EPOCH:", train_epoch_num
# Assign the learning rate
sess.run(tf.assign(model.lr, FLAGS.learning_rate))
#sess.run(tf.assign(model.lr, FLAGS.learning_rate))
# Decay the learning rate
#sess.run(tf.assign(model.lr, FLAGS.learning_rate * \
# (FLAGS.learning_rate_decay_factor ** epoch_num)))
#if epoch_num < 1000:
# sess.run(tf.assign(model.lr, FLAGS.learning_rate))
#elif epoch_num >= 1000: # slow down now
# sess.run(tf.assign(model.lr, 1e-4))
# Custom decay (empirically decided)
#if (epoch_num%1000 == 0):
# sess.run(tf.assign(model.lr,
# FLAGS.learning_rate/(10**(epoch_num//1000))))
# Train set
train_batch_loss = []
train_batch_accuracy = []
train_batch_gradient_norm = []
for train_batch_num, (batch_X, batch_y) in enumerate(train_epoch):
loss, accuracy, norm, _ = model.step(sess, batch_X, batch_y,
FLAGS.l, FLAGS.e, forward_only=False)
train_batch_loss.append(loss)
train_batch_accuracy.append(accuracy)
train_batch_gradient_norm.append(norm)
train_epoch_loss.append(np.mean(train_batch_loss))
train_epoch_accuracy.append(np.mean(train_batch_accuracy))
train_epoch_gradient_norm.append(np.mean(train_batch_gradient_norm))
print ('Epoch: [%i/%i] time: %.4f, loss: %.7f,'
' acc: %.7f, norm: %.7f' % (train_epoch_num, FLAGS.num_epochs,
time.time() - start_time, train_epoch_loss[-1],
train_epoch_accuracy[-1], train_epoch_gradient_norm[-1]))
# Validation set
valid_batch_loss = []
valid_batch_accuracy = []
for valid_epoch_num, valid_epoch in enumerate(generate_epoch(
valid_X, valid_y, num_epochs=1, batch_size=FLAGS.batch_size)):
for valid_batch_num, (batch_X, batch_y) in enumerate(valid_epoch):
loss, accuracy = model.step(sess, batch_X, batch_y,
FLAGS.l, FLAGS.e, forward_only=True)
valid_batch_loss.append(loss)
valid_batch_accuracy.append(accuracy)
valid_epoch_loss.append(np.mean(valid_batch_loss))
valid_epoch_accuracy.append(np.mean(valid_batch_accuracy))
# Save the model
if (train_epoch_num % FLAGS.save_every == 0 or
train_epoch_num == (FLAGS.num_epochs-1)) and \
(train_epoch_num > 0):
if not os.path.isdir(FLAGS.ckpt_dir):
os.makedirs(FLAGS.ckpt_dir)
checkpoint_path = os.path.join(FLAGS.ckpt_dir,
"%s.ckpt" % model_name)
print "Saving the model."
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
plt.plot(train_epoch_accuracy, label='train accuracy')
plt.plot(valid_epoch_accuracy, label='valid accuracy')
plt.legend(loc=4)
plt.title('%s_Accuracy' % FLAGS.model_name)
plt.show()
plt.plot(train_epoch_loss, label='train loss')
plt.plot(valid_epoch_loss, label='valid loss')
plt.legend(loc=3)
plt.title('%s_Loss' % FLAGS.model_name)
plt.show()
plt.plot(train_epoch_gradient_norm, label='gradient norm')
plt.legend(loc=4)
plt.title('%s_Gradient Norm' % FLAGS.model_name)
plt.show()
# Store results for global plot
with open('%s_results.p' % FLAGS.model_name, 'wb') as f:
cPickle.dump([train_epoch_accuracy, valid_epoch_accuracy,
train_epoch_loss, valid_epoch_loss,
train_epoch_gradient_norm], f)
def test(FLAGS):
"""
Sample inputs of your own.
"""
# Corpus for indexing
corpus = ['a','b','c','d','e','f','g','h','i','j','k','l',
'm','n','o','p','q','r','s','t','u','v','w','x','y','z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '?']
# Render the sample to proper input format
sample = 'g5o8k1??g'
X = []
for item in sample:
X.append(corpus.index(item))
X_one_hot = np.eye(26+10+1)[np.array(X).astype('int')]
with tf.Session() as sess:
if FLAGS.model_name == 'RNN-LN-FW':
# Inputs need to real inputs of batch_size 128
# because we use A(t) which updates even during testing
# Load the model
model = create_model(sess, FLAGS)
# Load real samples
with open(os.path.join(FLAGS.data_dir, 'train.p'), 'rb') as f:
train_X, train_y = cPickle.load(f)
for train_epoch_num, train_epoch in enumerate(generate_epoch(
train_X, train_y, 1, FLAGS.batch_size)):
for train_batch_num, (batch_X, batch_y) in enumerate(train_epoch):
batch_X[0] = X_one_hot
logits = model.logits.eval(feed_dict={model.X: batch_X,
model.l: FLAGS.l, model.e: FLAGS.e})
print "INPUT:", sample
print "PREDICTION:", corpus[np.argmax(logits[0])]
return
else:
# Reset from train sizes to sample sizes
FLAGS.batch_size = 1
# Load the model
model = create_model(sess, FLAGS)
logits = model.logits.eval(feed_dict={model.X: [X_one_hot],
model.l: FLAGS.l, model.e: FLAGS.e})
print "INPUT:", sample
print "PREDICTION:", corpus[np.argmax(logits)]
def plot_all():
"""
Plot the results.
"""
with open('CONTROL_results.p', 'rb') as f:
control_results = cPickle.load(f)
with open('RNN-LN_results.p', 'rb') as f:
RNN_LN_results = cPickle.load(f)
with open('RNN-LN-FW_results.p', 'rb') as f:
RNN_LN_FW_results = cPickle.load(f)
with open('GRU-LN_results.p', 'rb') as f:
GRU_LN_results = cPickle.load(f)
# Plotting accuracy
fig = plt.figure()
plt.plot(control_results[1], label='Control accuracy')
plt.plot(RNN_LN_results[1], label='RNN-LN accuracy')
plt.plot(RNN_LN_FW_results[1], label='RNN-LN-FW accuracy')
plt.plot(GRU_LN_results[1], label='GRU-LN accuracy')
plt.title('Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc=4)
fig.savefig('accuracy.png')
#plt.show()
# Plotting loss
fig = plt.figure()
plt.plot(control_results[3], label='Control loss')
plt.plot(RNN_LN_results[3], label='RNN-LN loss')
plt.plot(RNN_LN_FW_results[3], label='RNN-LN-FW loss')
plt.plot(GRU_LN_results[3], label='GRU-LN loss')
plt.title('Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc=1)
fig.savefig('loss.png')
#plt.show()
if __name__ == '__main__':
FLAGS = parameters()
if sys.argv[1] == 'train':
model_name = sys.argv[2]
FLAGS.ckpt_dir = FLAGS.ckpt_dir + model_name
FLAGS.model_name = model_name
train(FLAGS)
elif sys.argv[1] == 'test':
model_name = sys.argv[2]
FLAGS.ckpt_dir = FLAGS.ckpt_dir + model_name
FLAGS.model_name = model_name
test(FLAGS)
elif sys.argv[1] == 'plot':
plot_all()
| StarcoderdataPython |
3260639 | __all__ = [
'change_dir',
'suppress_stderr',
'start_server',
]
import contextlib
import http.server
import io
import os
import socketserver
import sys
import threading
@contextlib.contextmanager
def change_dir(path):
cwd = os.getcwd()
os.chdir(str(path))
try:
yield path
finally:
os.chdir(cwd)
@contextlib.contextmanager
def suppress_stderr():
target = io.StringIO()
stderr = sys.stderr
sys.stderr = target
try:
yield stderr, target
except BaseException:
print(target.getvalue(), file=stderr)
raise
finally:
sys.stderr = stderr
@contextlib.contextmanager
def start_server():
httpd = socketserver.TCPServer(
('127.0.0.1', 8000), http.server.SimpleHTTPRequestHandler,
)
thread = threading.Thread(name='httpd', target=httpd.serve_forever)
thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
thread.join()
| StarcoderdataPython |
3238769 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-29 21:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profiles', '0004_auto_20190429_2051'),
]
operations = [
migrations.CreateModel(
name='ProfileImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='media/')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='image',
name='user',
),
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(default='', max_length=500),
),
migrations.AlterField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, default='1950-01-01', null=True),
),
migrations.AlterField(
model_name='profile',
name='body_type',
field=models.CharField(choices=[('THIN', 'Thin'), ('AVERAGE', 'Average'), ('FIT', 'Fit'), ('MUSCULAR', 'Muscular'), ('A LITTLE EXTRA', 'A Little Extra'), ('CURVY', 'Curvy')], default='AVERAGE', max_length=15),
),
migrations.AlterField(
model_name='profile',
name='hair_colour',
field=models.CharField(choices=[('BLACK', 'Black'), ('BLONDE', 'Blonde'), ('BROWN', 'Brown'), ('RED', 'Red'), ('GREY', 'Grey'), ('BALD', 'Bald'), ('BLUE', 'Blue'), ('PINK', 'Pink'), ('GREEN', 'Green'), ('PURPLE', 'Purple'), ('OTHER', 'Other')], default='BLACK', max_length=10),
),
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(default='', max_length=30),
),
migrations.AlterField(
model_name='profile',
name='looking_for',
field=models.CharField(choices=[('MEN', 'Men'), ('WOMEN', 'Women'), ('BOTH', 'Both')], default='BOTH', max_length=5),
),
migrations.DeleteModel(
name='Image',
),
]
| StarcoderdataPython |
6570360 | <filename>app/db/models/bewertung.py
"""Bewertung structure for the DB"""
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from db.base import Base
class BewertungRestaurant(Base):
"""
Model for SQLAlchemy for the bewertung Table in the DB
Attributes:
person_email (str): Primary Key and ForeignKey of db.models.person.Person email
place_id (str): Primary Key and ForeignKey of db.models.restaurant.BewertungRestaurant
zeitstempel (sqlalchemy.DateTime): Set automatic in DB when updated
kommentar (str): Can be None
rating (int): Rating if the assessment
person (db.models.person.Person)
restaurant (db.models.restaurant.Restaurant)
"""
__tablename__ = "bewertungRestaurant"
person_email = Column(String, ForeignKey("person.email", ondelete="CASCADE"), primary_key=True)
place_id = Column(String, ForeignKey("restaurant.place_id", ondelete="CASCADE"), primary_key=True)
zeitstempel = Column(DateTime(timezone=True), server_onupdate=func.now(), server_default=func.now())
kommentar = Column(String, nullable=True)
rating = Column(Integer, nullable=False)
person = relationship("Person", back_populates="bewertungenRest")
restaurant = relationship("Restaurant", back_populates="bewertungen")
class BewertungRecipe(Base):
"""
Model for SQLAlchemy for the bewertung Table in the DB
Attributes:
person_email (str): Primary Key and ForeignKey of db.models.person.Person email
rezept_id (str): Primary Key and ForeignKey of db.models.restaurant.BewertungRecipe
rezept_name (str): Can not be None
zeitstempel (sqlalchemy.DateTime): Set automatic in DB when updated
kommentar (str): Can be None
rating (int): Rating if the assessment
person (db.models.person.Person)
restaurant (db.models.restaurant.Restaurant)
"""
__tablename__ = "bewertungRezept"
person_email = Column(String, ForeignKey("person.email", ondelete="CASCADE"), primary_key=True)
rezept_id = Column(String, ForeignKey("restaurant.place_id", ondelete="CASCADE"), primary_key=True)
rezept_name = Column(String, nullable=False)
zeitstempel = Column(DateTime(timezone=True), server_onupdate=func.now(), server_default=func.now())
kommentar = Column(String, nullable=True)
rating = Column(Integer, nullable=False)
person = relationship("Person", back_populates="bewertungenRezept")
| StarcoderdataPython |
338804 | <filename>jet_sidebar/templatetags/__init__.py
__author__ = 'raif21x'
| StarcoderdataPython |
3468734 | <reponame>vyvivekyadav04/InstaClone<filename>ourfirstapp/views.py<gh_stars>1-10
from django.shortcuts import render, redirect
from forms import SignUpForm, LoginForm, PostForm, LikeForm, CommentForm
from models import UserModel, SessionToken, PostModel, LikeModel, CommentModel
from django.contrib.auth.hashers import make_password, check_password
from datetime import timedelta
from django.utils import timezone
from testpro.settings import BASE_DIR
from imgurpython import ImgurClient
# Create your views here.
def signup_view(request):
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
name = form.cleaned_data['name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
#saving data to DB
user = UserModel(name=name, password=<PASSWORD>(password), email=email, username=username)
user.save()
return render(request, 'success.html')
#return redirect('login/')
else:
form = SignUpForm()
return render(request, 'index.html', {'form' : form})
def login_view(request):
response_data = {}
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = UserModel.objects.filter(username=username).first()
if user:
if check_password(password, user.password):
token = SessionToken(user=user)
token.create_token()
token.save()
response = redirect('feed/')
response.set_cookie(key='session_token', value=token.session_token)
return response
else:
response_data['message'] = 'Incorrect Password! Please try again!'
elif request.method == 'GET':
form = LoginForm()
response_data['form'] = form
return render(request, 'login.html', response_data)
def post_view(request):
user = check_validation(request)
if user:
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
image = form.cleaned_data.get('image')
caption = form.cleaned_data.get('caption')
post = PostModel(user=user, image=image, caption=caption)
post.save()
path = str(BASE_DIR + post.image.url)
client = ImgurClient(YOUR_CLIENT_ID, YOUR_CLIENT_SECRET)
post.image_url = client.upload_from_path(path,anon=True)['link']
post.save()
return redirect('/feed/')
else:
form = PostForm()
return render(request, 'post.html', {'form' : form})
else:
return redirect('/login/')
def feed_view(request):
user = check_validation(request)
if user:
posts = PostModel.objects.all().order_by('created_on')
for post in posts:
existing_like = LikeModel.objects.filter(post_id=post.id, user=user).first()
if existing_like:
post.has_liked = True
return render(request, 'feed.html', {'posts': posts})
else:
return redirect('/login/')
def like_view(request):
user = check_validation(request)
if user and request.method == 'POST':
form = LikeForm(request.POST)
if form.is_valid():
post_id = form.cleaned_data.get('post').id
existing_like = LikeModel.objects.filter(post_id=post_id, user=user).first()
if not existing_like:
LikeModel.objects.create(post_id=post_id, user=user)
else:
existing_like.delete()
return redirect('/feed/')
else:
return redirect('/login/')
def comment_view(request):
user = check_validation(request)
if user and request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
post_id = form.cleaned_data.get('post').id
comment_text = form.cleaned_data.get('comment_text')
comment = CommentModel.objects.create(user=user, post_id=post_id, comment_text=comment_text)
comment.save()
return redirect('/feed/')
else:
return redirect('/feed/')
else:
return redirect('/login')
#For validating the session
def check_validation(request):
if request.COOKIES.get('session_token'):
session = SessionToken.objects.filter(session_token=request.COOKIES.get('session_token')).first()
if session:
time_to_live = session.created_on + timedelta(days=1)
if time_to_live > timezone.now():
return session.user
else:
return None | StarcoderdataPython |
4839637 | <gh_stars>0
#!/usr/bin/env python3
# continually logs the key presses and their frequency over 9 second window. Logs are written
# in logs/keyfreqX.txt every 9 seconds, where X is unix timestamp of 7am of the
# recording day.
import queue
from threading import Thread
from pynput import mouse
import pathlib
from os.path import dirname, join, abspath
import os
import time
import random
import subprocess
import sys
cwd = pathlib.Path(os.getcwd()).absolute().resolve()
assert (cwd / "rewind7am.py").is_file(), "All logging scripts have to be run in the main directory"
def rewind(t):
return int(subprocess.check_output([str(cwd / "rewind7am.py"), str(t)]))
logs_dir = cwd / "logs"
key_events = queue.Queue()
INTERVAL = 9
def write_events(interval_start, events):
t = rewind(interval_start)
assert type(t) is int
freq_file = logs_dir / f"mousefreq_{t}.txt"
codes_file = logs_dir / f"mouse_events_{t}.txt"
num_events = len(events)
print(f"writing out {num_events} mouse events @ {interval_start=} to {freq_file=}, {codes_file=}")
with freq_file.open("a") as f:
f.write(f'{int(interval_start)} {num_events}\n')
with codes_file.open('a') as f:
for ts, ev in events:
f.write(f'{int(ts)} {ev}\n')
def take_while(ls, pred):
assert type(ls) is list
res = []
for i, x in enumerate(ls):
if pred(x):
res.append(x)
else:
return res, ls[i:]
return [], ls
cur_end = int(time.time()) + INTERVAL
pending = []
with mouse.Events() as events:
while True:
max_wait = max(0, cur_end + 1 - time.time()) # wait for an extra second to give events time to come in
e = events.get(max_wait)
ts = time.time()
if e is not None:
pending.append((ts, e))
while ts > cur_end:
interval_events, pending = take_while(pending, lambda x: x[0] < cur_end) # take out ones in this interval
write_events(cur_end - INTERVAL, interval_events) # write out
cur_end += INTERVAL # advance interval
| StarcoderdataPython |
6570730 | <reponame>giselemanuel/programming-challenges
"""
Exercício Python 29: Escreva um programa que leia a velocidade de um carro.
Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado.
A multa vai custar R$7,00 por cada Km acima do limite.
"""
print("-" * 40)
print(f"{'Radar Eletrônico':^40}")
print("-" * 40)
multa = 7
km = float(input("Insira a sua velocidade (km) : "))
if km > 80:
multa = multa * (km - 80)
print(f"Sua multa é de R$ {multa:.2f}")
else:
print("Obrigada por dirigir no limite da velocidade.") | StarcoderdataPython |
9697453 | <reponame>alisterburt/eulerangles<filename>eulerangles/math/constants.py
valid_axes = (
'xyz',
'xyx',
'xzx',
'xzy',
'yxy',
'yxz',
'yzx',
'yzy',
'zxy',
'zxz',
'zyx',
'zyz',
)
valid_matrix_composition_modes = (
'intrinsic',
'extrinsic'
)
| StarcoderdataPython |
3591628 | import click
import pytorch_lightning as pl
import torch
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.plugins import DeepSpeedPlugin
from pytorch_pqrnn.dataset import create_dataloaders
from pytorch_pqrnn.model import PQRNN
from rich.console import Console
console = Console()
@click.command()
@click.option(
"--task",
type=click.Choice(["yelp2", "yelp5", "toxic"], case_sensitive=False),
default="yelp5",
show_default=True,
)
@click.option("--b", type=int, default=128, show_default=True)
@click.option("--d", type=int, default=96, show_default=True)
@click.option("--num_layers", type=int, default=2, show_default=True)
@click.option("--batch_size", type=int, default=512, show_default=True)
@click.option("--dropout", type=float, default=0.5, show_default=True)
@click.option("--lr", type=float, default=1e-3, show_default=True)
@click.option("--nhead", type=int, default=4, show_default=True)
@click.option(
"--rnn_type",
type=click.Choice(
["LSTM", "GRU", "QRNN", "Transformer"], case_sensitive=False
),
default="GRU",
show_default=True,
)
@click.option("--data_path", type=str, default="data")
def train(
task: str,
b: int,
d: int,
num_layers: int,
batch_size: int,
dropout: float,
lr: float,
nhead: int,
rnn_type: str,
data_path: str,
):
deepspeed_config = {
"zero_allow_untested_optimizer": True,
"optimizer": {
"type": "Adam",
"params": {
"lr": lr,
"betas": [0.998, 0.999],
"eps": 1e-5,
"weight_decay": 1e-9,
},
},
"scheduler": {
"type": "WarmupLR",
"params": {
"last_batch_iteration": -1,
"warmup_min_lr": 0,
"warmup_max_lr": 3e-5,
"warmup_num_steps": 100,
},
},
"zero_optimization": {
"stage": 2, # Enable Stage 2 ZeRO (Optimizer/Gradient state partitioning)
"cpu_offload": True, # Enable Offloading optimizer state/calculation to the host CPU
"contiguous_gradients": True, # Reduce gradient fragmentation.
"overlap_comm": True, # Overlap reduce/backward operation of gradients for speed.
"allgather_bucket_size": 2e8, # Number of elements to all gather at once.
"reduce_bucket_size": 2e8, # Number of elements we reduce/allreduce at once.
},
}
train_dataloader, dev_dataloader = create_dataloaders(
task,
batch_size=batch_size,
feature_size=b * 2,
label2index=None,
data_path=data_path,
)
num_classes = {"yelp2": 2, "yelp5": 5, "toxic": 6}.get(task, 2)
model = PQRNN(
b=b,
d=d,
lr=lr,
num_layers=num_layers,
dropout=dropout,
output_size=num_classes,
rnn_type=rnn_type,
multilabel=task == "toxic",
nhead=nhead,
)
trainer = pl.Trainer(
logger=pl_loggers.TensorBoardLogger("lightning_logs", log_graph=False),
callbacks=[EarlyStopping(monitor="val_loss", patience=5)],
checkpoint_callback=ModelCheckpoint(
"./checkpoints/", monitor="val_loss"
),
min_epochs=2,
deterministic=True,
val_check_interval=0.2,
gpus=list(range(torch.cuda.device_count()))
if torch.cuda.is_available()
else None,
gradient_clip_val=1.0,
accelerator="ddp" if torch.cuda.is_available() else None,
precision=16 if torch.cuda.is_available() else 32,
accumulate_grad_batches=2 if rnn_type == "Transformer" else 1,
)
trainer.fit(model, train_dataloader, dev_dataloader)
if __name__ == "__main__":
train()
| StarcoderdataPython |
12814169 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pickle as pkl
from sklearn.decomposition import PCA
class PCA_Analysis():
def __init__(self, X, y, PCA_N_components, feature_label=None):
self.X = X
self.y = y
self.N_components = PCA_N_components
self.feature_label = feature_label
if self.feature_label is None:
self.feature_label = [str(i) for i in range(self.X.shape[1])]
def do_pca(self):
pca = PCA(n_components=self.N_components, whiten=False)
self.reduced_x = pca.fit_transform(self.X)
self.comp = pca.components_
def plot_pca_components_weights(self):
# 权重图中正和负权重的颜色,包括字体和条形图
OPP_COLOR = "firebrick"
NEG_COLOR = "dodgerblue"
if (self.N_components == 1):
ax1 = plt.subplot(122)
ax2 = plt.subplot(121)
# 分别绘制正数和负数区域
minus_index = [] # 负数的区域单独加上,之后会影响text颜色
zero_index = []
def plt_one_direction_bar(ax, direction):
x = []
y = []
for i in range(self.comp.shape[1]):
if (direction == 1 and self.comp[0, i] > 0) or \
(direction == -1 and self.comp[0, i] < 0):
if (self.comp[0, i]) < 0: minus_index.append(i)
if (self.comp[0, i]) == 0: zero_index.append(i)
x.append(i)
y.append(self.comp[0, i])
else:
x.append(i)
y.append(0)
if (direction == 1):
ax.barh(x, y, color=OPP_COLOR)
elif (direction == -1):
ax.barh(x, y, color=NEG_COLOR)
return ax
ax1.get_yaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
ax1.set_xlim(0, -0.35)
ax1.set_xlim(0, 0.35)
ax1.spines['top'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.spines['right'].set_visible(False)
plt_one_direction_bar(ax1, 1)
plt_one_direction_bar(ax2, -1)
plt.subplots_adjust(wspace=0.11, hspace=0)
for i in range(len(self.feature_label)):
string = self.feature_label[i]
if i in minus_index:
color = NEG_COLOR
else:
color = OPP_COLOR
plt.text(0.005, i - 0.4, string, horizontalalignment='center', color=color)
plt.show()
else:
plt.imshow(self.comp)
plt.show()
def show_pca_classification_results(self):
# foreach label, use different color, add regard pca components as (x), (x,y) or (x,y,z)
if (self.N_components >3):
raise ValueError("Can not plot more than 4 dimension")
total_label_pos_data = {}
for sample_index in range(self.reduced_x.shape[0]):
try:
total_label_pos_data[list(y[sample_index]).index(1)].append(self.reduced_x[sample_index, :])
except:
total_label_pos_data[list(y[sample_index]).index(1)] = []
color = ["dodgerblue", "firebrick", "green", "cyan", "magenta", "yellow"]
total_used_sample = 0
if self.N_components == 3: ax = plt.subplot(111, projection="3d")
for label_index in range(len(total_label_pos_data.keys())):
data = np.array(total_label_pos_data[label_index])
data = data.reshape(self.N_components, -1)
if (self.N_components == 1):
plt.plot(np.random.randint(low=0, high=1000, size=data[0, :].shape), data[0, :], "o",
color=color[label_index], alpha=0.5)
total_used_sample += data.shape[1]
elif (self.N_components == 2):
plt.plot(data[0, :], data[1, :], "o", color=color[label_index])
elif (self.N_components == 3):
ax.scatter(data[0, :], data[1, :], data[2, :], "x", color=color[label_index])
plt.show()
if __name__ == '__main__':
# 1. load data, X: shape (n_sample,n_feature), y: shape (n_sample, one-hot)
# example data shape: X (4000,49), y:(4000,6)
X = np.load("X.npy")
y = np.load("y.npy")
# 2. do pca
test = PCA_Analysis(X, y, 3)
test.do_pca()
# 3. show the weights of feature to final reduced_x
test.plot_pca_components_weights()
# 4. plot classification results
test.show_pca_classification_results()
| StarcoderdataPython |
3223188 | <gh_stars>0
import os
ls=["python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold0_random_snow.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold1_random_snow.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold2_random_snow.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold3_random_snow.yml",
"python main.py --configs configs/train_medseg_unetplusplus_timm-regnetx_002_fold4_random_snow.yml",
]
for l in ls:
os.system(l) | StarcoderdataPython |
8137905 | <reponame>ivan-golub/rules_ios
feature_names = struct(
# Virtualize means that swift,clang read from llvm's in-memory file system
virtualize_frameworks = "apple.virtualize_frameworks",
# Use the ARM deps for the simulator - see rules/import_middleman.bzl
arm64_simulator_use_device_deps = "apple.arm64_simulator_use_device_deps",
bazel4_override_simulator_cpu_arm64 = "bazel4.override_simulator_cpu_arm64",
)
| StarcoderdataPython |
3234094 | <filename>dot/tests/test_model.py
import numpy as np
from lightkurve import LightCurve
import pymc3 as pm
import pytest
from ..model import Model
np.random.seed(42)
# Create a grid of spot longitudes, latitudes, radii and stellar inclinations
# for iterating over in the tests below...
n_trials = 10
lon_grid = 2 * np.pi * np.random.rand(n_trials, 2)
lat_grid = 0.5 * np.pi * (np.random.rand(n_trials, 2) - 0.5)
rad_grid = 0.05 + np.max([0.01 * np.random.randn(n_trials, 2),
np.zeros((n_trials, 2))], axis=0)
inc_grid = 90 * np.random.rand(n_trials)
# This decorator at the top allows us to iterate over the various pairs
# of spot lons, lats, rads, and stellar inclinations. We compare the result
# of the chi^2 computation with the 1e-9 and make sure it's smaller, i.e.
# we test for excellent agreement between fleck and dot.
@pytest.mark.parametrize("test_input,expected",
[((lons, lats, rads, inc), -6)
for lons, lats, rads, inc in
zip(lon_grid, lat_grid, rad_grid, inc_grid)])
def test_against_fleck(test_input, expected):
from fleck import Star
import astropy.units as u
# Unpack the input
lons, lats, rads, inc = test_input
# Create a time axis and compute the light curve on that axis
times = np.linspace(-4 * np.pi, 4 * np.pi, 100)
s = Star(spot_contrast=0.3, u_ld=[0, 0], n_phases=len(times),
rotation_period=2 * np.pi)
# Note: the longitudes in fleck and dot are different by a constant
# offset of -pi/2
lc = s.light_curve((lons - np.pi / 2).T[:, None] * u.rad,
lats.T[:, None] * u.rad,
rads.T[:, None], inc * u.deg,
times=times, time_ref=0)[:, 0]
# Define (arbitrary) error for the light curve
errs = np.std(lc) * np.ones_like(lc) / 10
m = Model(
light_curve=LightCurve(times, lc - np.median(lc), errs),
rotation_period=2 * np.pi,
n_spots=2,
partition_lon=False,
contrast=0.3
)
# Create a starting point for the dot model in the correctly transformed
# notation from fleck to dot
start = {
"dot_R_spot": np.array([rads]),
"dot_lat": np.array([np.pi / 2 - lats]),
"dot_lon": np.array([lons]),
"dot_comp_inc": np.radians(90 - inc),
"dot_ln_shear": np.log(1e-2),
"dot_P_eq": 2 * np.pi,
"dot_f0": m.lc.flux.max()
}
# Need to call this to validate ``start``
pm.util.update_start_vals(start, m.pymc_model.test_point, m.pymc_model)
# the fit is not normalized to its median like the input light curve is
fit = m(start)
# ...so we normalize it before we compare:
fit -= np.median(fit)
# Compute the chi^2. This should be super small if both models agree!
chi2 = np.sum((m.lc.flux - fit) ** 2)
assert np.log10(chi2) < expected
| StarcoderdataPython |
36096 | import pytest
from buyer import serializers
@pytest.mark.django_db
def test_buyer_deserialization():
data = {
'email': '<EMAIL>',
'name': '<NAME>',
'sector': 'AEROSPACE',
'company_name': 'Example corp',
'country': 'China',
}
serializer = serializers.BuyerSerializer(data=data)
assert serializer.is_valid()
instance = serializer.save()
assert instance.email == data['email']
assert instance.name == data['name']
assert instance.sector == data['sector']
assert instance.company_name == data['company_name']
assert instance.country == data['country']
| StarcoderdataPython |
1610662 | <reponame>poizon/SublimeLinter
import sublime
import sublime_plugin
from Default import history_list
from itertools import dropwhile, takewhile
from .lint import persist, util
from .lint.util import flash
MYPY = False
if MYPY:
from typing import Union
from typing_extensions import Literal
Direction = Union[Literal['next'], Literal['previous']]
class sublime_linter_goto_error(sublime_plugin.TextCommand):
def run(self, edit, direction='next', count=1, wrap=False):
# type: (sublime.Edit, Direction, int, bool) -> None
goto(self.view, direction, count, wrap)
def goto(view, direction, count, wrap):
# type: (sublime.View, Direction, int, bool) -> None
filename = util.get_filename(view)
errors = persist.file_errors.get(filename)
if not errors:
flash(view, 'No problems')
return
cursor = view.sel()[0].begin()
# Filter regions under the cursor, bc we don't want to jump to them.
# Also filter duplicate start positions.
all_jump_positions = sorted({
error['region'].begin()
for error in errors
if not error['region'].contains(cursor)})
# Edge case: Since we filtered, it is possible we get here with nothing
# left. That is the case if we sit on the last remaining error, where we
# don't have anything to jump to and even `wrap` becomes a no-op.
if len(all_jump_positions) == 0:
flash(view, 'No more problems')
return
def before_current_pos(pos):
return pos < cursor
next_positions = dropwhile(before_current_pos, all_jump_positions)
previous_positions = takewhile(before_current_pos, all_jump_positions)
reverse = direction == 'previous'
jump_positions = list(previous_positions if reverse else next_positions)
if reverse:
jump_positions = list(reversed(jump_positions))
if not jump_positions:
if wrap:
point = all_jump_positions[-1] if reverse else all_jump_positions[0]
flash(
view,
'Jumped to {} problem'.format('last' if reverse else 'first'))
else:
flash(
view,
'No more problems {}'.format('above' if reverse else 'below'))
return
elif len(jump_positions) <= count:
# If we cannot jump wide enough, do not wrap, but jump as wide as
# possible to reduce disorientation.
point = jump_positions[-1]
else:
point = jump_positions[count - 1]
move_to(view, point)
class _sublime_linter_move_cursor(sublime_plugin.TextCommand):
# We ensure `on_selection_modified` handlers run by using a `TextCommand`.
# See: https://github.com/SublimeLinter/SublimeLinter/pull/867
# and https://github.com/SublimeTextIssues/Core/issues/485#issuecomment-337480388
def run(self, edit, point):
self.view.sel().clear()
self.view.sel().add(point)
self.view.show(point)
def move_to(view, point):
# type: (sublime.View, int) -> None
history_list.get_jump_history_for_view(view).push_selection(view)
view.run_command('_sublime_linter_move_cursor', {'point': point})
| StarcoderdataPython |
67145 | <reponame>starofrainnight/ipgetter2<filename>tests/test_compatible.py
# -*- coding: utf-8 -*-
"""Tests for `compatible` package."""
def test_backport_random():
"""Test the backport randome for python 3.5"""
import random
from ipgetter2.compatible import backport_random_choices
population = range(1, 6)
choices = backport_random_choices(random._inst, population)
assert len(set(choices) & set(population)) > 0
| StarcoderdataPython |
282233 | from flask import current_app, g, Flask
from src.models import init_base
def get_db():
"""
Get the loaded db object from neomodel
"""
pass
def close_db(e=None):
"""
Used to close the session and end the connection between the database
and the client
"""
pass
def init_app(app: Flask):
# register the close_db with the removal of the app context event
app.teardown_appcontext(close_db)
# init_base is used to initialise the NeoModel config
init_base(app)
def init_db(app):
from .utils.detect_loaders import detect
import importlib
try:
loaders = importlib.import_module('src.database.loaders')
mods = detect(loaders)
for mod in mods:
mod_obj = getattr(loaders, mod)
run_method = getattr(mod_obj, 'run')
run_method(app)
except ImportError:
pass
def distroy_db(app):
"""
wipe the database
"""
pass | StarcoderdataPython |
167489 | <gh_stars>1-10
"""Code for LibUnion class"""
import string
import re
import os
import UserDict
import sys
import shutil
from . import setup_globals
from .setup_globals import gvars, SetupError
from .utils import get_rel_path
from .flash_lib import FlashLib
from .lazy_file import LazyFile
class LibUnion(UserDict.UserDict):
def __init__(self,libs): # list of lib names
UserDict.UserDict.__init__(self)
self.adjust(libs)
def adjust(self,liblist): # satisfy dependencies and perform checks
pwd = os.getcwd()
os.chdir(gvars.libDir)
ans = self.libDepHelper(liblist,[])
# now self["libname"] -> FlashLib("libname")
os.chdir(pwd)
self.topoSort(ans)
# now self.libOrder is a list of pairs containing libname and arguments
def libDepHelper(self,libs,done):
"""Add libraries which those in libs depend on.
done is a list of names of libraries already processed"""
tocheck = None
for a in libs.keys():
if a not in done:
tocheck = a
break
# Nothing left to do
if not tocheck: return libs
lib = tocheck
self[lib] = FlashLib(lib) # get lib Config Info. Handles, internal and external libraries correctly
newdeps = self[lib]['LIBRARY']
for x in newdeps.keys(): # for each dependency update libs with args
if not libs.has_key(x): libs[x] = []
libs[x].append(newdeps[x])
done.append(tocheck)
return self.libDepHelper(libs,done) # recursive call to handle the rest
# libs is a dictionary mapping libs -> list of arglists (one for each person depending on this lib)
# self is also a dictionary mapping library name to FlashLib instance for specific library
# in particular self["libname"]["LIBRARY"] contains other libs it depends on
# returns all libaries in an order so that all the dependencies of lib comes before the lib itself
def topoSort(self,libs):
curr = [] # list of libraries whose dependencies have been satisfied
rest = libs.keys() # those whose dependencies need to be satisfied
while rest: # As long as their is something to do
success = 0
for lib in rest:
libOK = 1 # have we satisfied all dependencies of "lib"
# self[lib][LIBRARY] maps libname to argument for lib
for other in self[lib]['LIBRARY'].keys():
if other not in curr: libOK = 0
if libOK == 1:
success = 1
curr.append(lib)
rest.remove(lib)
if success == 0: # could satisfy any more dependencies
break
if rest: # dependency cycle detected
gvars.out.put("Topological Sort Failed: Cycle Detected",setup_globals.ERROR)
gvars.out.put("Suspect Libraries: %s"% string.join(rest,","),setup_globals.ERROR)
raise SetupError("Check your library dependencies")
# Now check if different people depend on the library in a different way
# if so flag an error
self.libOrder = []
for lib in curr:
args = filter(None,libs[lib]) # pull out all non-trivial arguments
if not args:
arg = ""
else:
arg = args[0]
if [x for x in args if x != arg]: # if there is any entry in args other than args[0]
gvars.out.put("non-unique non-empty argument lists for library %s\n"%lib,setup_globals.ERROR)
gvars.out.put("Argument Lists are: %s\n"% str(args),setup_globals.DEBUG)
raise SetupError()
self.libOrder.append((lib,arg))
######################### remaining methods deal with generating flags for libraries
######################## and building the library if reqd
# set the list of macros found in Makefile.h
def setMacros(self,macros=[]):
self.macros = macros
def getLibFlags(self,lib,buildFlag,args="",makefilename=""):
"""Returns dictionary of flags to be added to the appropriate stage of building
the executable.
macros is the list of macros defined in Makefile.h
args is the list argument strings (ideally there will 0/1 only)
CFLAGS -> Include flags
FFLAGS -> include flags
LIB -> Linking flags
or if lib is not internal calls extLibFlags to find the flags
Does not handle the libraries that this library depends on.
Raises errors if internal lib exists but is troublesome"""
base = string.lower(lib)
libDir = os.path.join(gvars.libDir,base)
#relLibDir = get_rel_path(libDir,".")
relLibDir = get_rel_path(libDir, gvars.project_setup_dir)
if not os.path.isdir(libDir):
return self.extLibFlags(lib,buildFlag) # args are of no use here
# does ...lib/libname/libinfo.py exist?
if os.path.isfile(os.path.join(libDir,'libinfo.py')):
# use the new method of getting info
sys.path.insert(1,libDir) # ask python to search here
# call the libinfo function in libinfo.py and store its result
libFlags = __import__("libinfo").libinfo(absLibDir=libDir,
relLibDir=relLibDir,
buildFlag=buildFlag,
args=args,
macros=self.macros)
sys.path.remove(libDir)
del sys.modules["libinfo"]
# if we are an internal library and need to rebuild
if libFlags == None: # programming error
raise SetupError("libinfo for %s returned nothing. Programming error" % base)
if libFlags.has_key("INTERNAL"): # pick up default flag info
libFlags = self.intLibFlags(base,subdir=libFlags["INTERNAL"],absLibDir=libDir,relLibDir=relLibDir)
if libFlags.has_key("EXTERNAL"): # pick up default flag info for external
libFlags = self.extLibFlags(libFlags["EXTERNAL"],buildFlag)
if self[lib]["TYPE"]=="INTERNAL" and libFlags.get("REBUILD",None):
self.makeBinary(libDir,base,makefilename)
return libFlags
# now we know that there is no libinfo.py
if self[lib]["TYPE"]=="EXTERNAL":
return self.extLibFlags(lib.upper(),buildFlag)
# no libinfo and internal: Compile the binary if reqd
if self.libraryExists == False:
self.makeBinary(libDir,base,makefilename)
# return the default infor for simple internal binaries
return self.intLibFlags(base,absLibDir=libDir,relLibDir=relLibDir)
# Return the usual flags
# libname = name of the library e.g. pfft
# subdir = subdirectory containing what we want
# libDir = base directory for library "FLASHHOME/lib/pfft"
def intLibFlags(self,libname,subdir="",absLibDir="",relLibDir=""):
ans = {}
if subdir != "" :
relLibDir = os.path.join(relLibDir,subdir)
absLibDir = os.path.join(absLibDir,subdir)
libname = subdir.lower()
else:
libname = libname.lower()
ans["LIB"] = '-L%s/object/ -l%s'%(relLibDir,libname)
if os.path.isdir(os.path.join(absLibDir, 'include')):
includeMacro = '-I%s/include'%relLibDir
else:
includeMacro = ''
ans["CFLAGS"] = includeMacro
ans["FFLAGS"] = includeMacro
if not os.path.isfile(os.path.join(absLibDir,"lib%s.a"%libname)):
ans["REBUILD"]=1
return ans
# return CFLAGS, FFLAGS,... from the makefile
def extLibFlags(self,lib,buildFlag):
ans = {}
for compiler in setup_globals.COMPILERS:
for macro in ['%s_%s_%s'%(compiler, lib.upper(), buildFlag),
'%s_%s_%s'%(compiler, lib.upper(), setup_globals.DEFLTFLAG),
'%s_%s'%(compiler, lib.upper())]:
if macro in self.macros:
ans[compiler] = '$(%s)'%macro
break
return ans
def libraryExists(self,libDir,libname):
obj_dir = os.path.join(libDir, 'object')
binary = os.path.join(obj_dir, 'lib%s.a'%libname)
if os.path.isfile(binary):
return True
else:
return False
# Build the binary if required
def makeBinary(self,libDir,libname,makefilename):
USAGE = """
Fatal Error: setup is unable to locate the internal library %(libname)s.
In order to have setup identify an internal library, one of the following should hold:
* the file %(libDir)s/libinfo.py must exist and contain a function libinfo
this function must return a dictionary containing flags to be passed to the
different compilers and linkers. libinfo can request that the binary be
rebuilt by setting the REBUILD flag in its return value
* the file %(libDir)s/object/lib%(libname)s.a must exist
* the file %(libDir)s/build.csh or %(libDir)s/build.py must exist
this script is given the location of Makefile.h as its only argument
- If there is a libinfo.py the build script must generate
the library specified by return value of libinfo function
- Otherwise the build script must generate %(libDir)s/object/lib%(libname)s.a
"""
pwd = os.getcwd()
os.chdir(libDir)
# Link/Copy current Makefile.h so library is built using
# the same compiler and options as the FLASH code
if os.path.isfile("Makefile.h"): os.remove("Makefile.h")
if gvars.portable:
shutil.copy2(makefilename,"Makefile.h")
else:
os.symlink(makefilename,"Makefile.h")
if os.path.isfile('build.py'):
gvars.out.put('exec\'ing %s/build.py'%libDir)
exec(open('./build.py').read())
elif os.path.isfile('build.csh'):
gvars.out.put('running %s/build.csh'%libDir)
os.system('./build.csh')
else:
raise SetupError(USAGE % locals())
os.chdir(pwd)
def write_libraries(self):
#fd = LazyFile(os.path.join(gvars.flash_src_dir,gvars.project_build_dir,setup_globals.SETUP_LIBRARIES_FILENAME))
fd = LazyFile(os.path.join(gvars.project_setup_dir, setup_globals.SETUP_LIBRARIES_FILENAME))
for (libname,args) in self.libOrder:
if args:
fd.write("%8s : %s\n" % (libname,args))
else: fd.write("%8s : \n" % libname)
fd.close()
if not fd.samefile and gvars.noClobber:
gvars.out.put("WARNING: Library requirements have changed from previous run. Ignoring noClobber")
gvars.noClobber = 0
| StarcoderdataPython |
11326613 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FirehoseDeliveryStreamArgs', 'FirehoseDeliveryStream']
@pulumi.input_type
class FirehoseDeliveryStreamArgs:
def __init__(__self__, *,
destination: pulumi.Input[str],
arn: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']] = None,
extended_s3_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']] = None,
http_endpoint_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']] = None,
kinesis_source_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']] = None,
s3_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']] = None,
server_side_encryption: Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']] = None,
splunk_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FirehoseDeliveryStream resource.
:param pulumi.Input[str] destination: This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream
:param pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs'] elasticsearch_configuration: Configuration options if elasticsearch is the destination. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs'] extended_s3_configuration: Enhanced configuration options for the s3 destination. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs'] http_endpoint_configuration: Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs'] kinesis_source_configuration: Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
:param pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs'] redshift_configuration: Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs'] s3_configuration: Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs'] server_side_encryption: Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
:param pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs'] splunk_configuration: Configuration options if splunk is the destination. More details are given below.
:param pulumi.Input[str] version_id: Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
pulumi.set(__self__, "destination", destination)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if destination_id is not None:
pulumi.set(__self__, "destination_id", destination_id)
if elasticsearch_configuration is not None:
pulumi.set(__self__, "elasticsearch_configuration", elasticsearch_configuration)
if extended_s3_configuration is not None:
pulumi.set(__self__, "extended_s3_configuration", extended_s3_configuration)
if http_endpoint_configuration is not None:
pulumi.set(__self__, "http_endpoint_configuration", http_endpoint_configuration)
if kinesis_source_configuration is not None:
pulumi.set(__self__, "kinesis_source_configuration", kinesis_source_configuration)
if name is not None:
pulumi.set(__self__, "name", name)
if redshift_configuration is not None:
pulumi.set(__self__, "redshift_configuration", redshift_configuration)
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if splunk_configuration is not None:
pulumi.set(__self__, "splunk_configuration", splunk_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter
def destination(self) -> pulumi.Input[str]:
"""
This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: pulumi.Input[str]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) specifying the Stream
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="destinationId")
def destination_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "destination_id")
@destination_id.setter
def destination_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_id", value)
@property
@pulumi.getter(name="elasticsearchConfiguration")
def elasticsearch_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]:
"""
Configuration options if elasticsearch is the destination. More details are given below.
"""
return pulumi.get(self, "elasticsearch_configuration")
@elasticsearch_configuration.setter
def elasticsearch_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]):
pulumi.set(self, "elasticsearch_configuration", value)
@property
@pulumi.getter(name="extendedS3Configuration")
def extended_s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]:
"""
Enhanced configuration options for the s3 destination. More details are given below.
"""
return pulumi.get(self, "extended_s3_configuration")
@extended_s3_configuration.setter
def extended_s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]):
pulumi.set(self, "extended_s3_configuration", value)
@property
@pulumi.getter(name="httpEndpointConfiguration")
def http_endpoint_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]:
"""
Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "http_endpoint_configuration")
@http_endpoint_configuration.setter
def http_endpoint_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]):
pulumi.set(self, "http_endpoint_configuration", value)
@property
@pulumi.getter(name="kinesisSourceConfiguration")
def kinesis_source_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]:
"""
Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
"""
return pulumi.get(self, "kinesis_source_configuration")
@kinesis_source_configuration.setter
def kinesis_source_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]):
pulumi.set(self, "kinesis_source_configuration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="redshiftConfiguration")
def redshift_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]:
"""
Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "redshift_configuration")
@redshift_configuration.setter
def redshift_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]):
pulumi.set(self, "redshift_configuration", value)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]:
"""
Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
"""
return pulumi.get(self, "s3_configuration")
@s3_configuration.setter
def s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]):
pulumi.set(self, "s3_configuration", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]:
"""
Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter(name="splunkConfiguration")
def splunk_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]:
"""
Configuration options if splunk is the destination. More details are given below.
"""
return pulumi.get(self, "splunk_configuration")
@splunk_configuration.setter
def splunk_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]):
pulumi.set(self, "splunk_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
@pulumi.input_type
class _FirehoseDeliveryStreamState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']] = None,
extended_s3_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']] = None,
http_endpoint_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']] = None,
kinesis_source_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']] = None,
s3_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']] = None,
server_side_encryption: Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']] = None,
splunk_configuration: Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering FirehoseDeliveryStream resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream
:param pulumi.Input[str] destination: This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
:param pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs'] elasticsearch_configuration: Configuration options if elasticsearch is the destination. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs'] extended_s3_configuration: Enhanced configuration options for the s3 destination. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs'] http_endpoint_configuration: Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs'] kinesis_source_configuration: Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
:param pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs'] redshift_configuration: Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs'] s3_configuration: Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
:param pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs'] server_side_encryption: Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
:param pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs'] splunk_configuration: Configuration options if splunk is the destination. More details are given below.
:param pulumi.Input[str] version_id: Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if destination_id is not None:
pulumi.set(__self__, "destination_id", destination_id)
if elasticsearch_configuration is not None:
pulumi.set(__self__, "elasticsearch_configuration", elasticsearch_configuration)
if extended_s3_configuration is not None:
pulumi.set(__self__, "extended_s3_configuration", extended_s3_configuration)
if http_endpoint_configuration is not None:
pulumi.set(__self__, "http_endpoint_configuration", http_endpoint_configuration)
if kinesis_source_configuration is not None:
pulumi.set(__self__, "kinesis_source_configuration", kinesis_source_configuration)
if name is not None:
pulumi.set(__self__, "name", name)
if redshift_configuration is not None:
pulumi.set(__self__, "redshift_configuration", redshift_configuration)
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
if server_side_encryption is not None:
pulumi.set(__self__, "server_side_encryption", server_side_encryption)
if splunk_configuration is not None:
pulumi.set(__self__, "splunk_configuration", splunk_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if version_id is not None:
pulumi.set(__self__, "version_id", version_id)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) specifying the Stream
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter(name="destinationId")
def destination_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "destination_id")
@destination_id.setter
def destination_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_id", value)
@property
@pulumi.getter(name="elasticsearchConfiguration")
def elasticsearch_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]:
"""
Configuration options if elasticsearch is the destination. More details are given below.
"""
return pulumi.get(self, "elasticsearch_configuration")
@elasticsearch_configuration.setter
def elasticsearch_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]):
pulumi.set(self, "elasticsearch_configuration", value)
@property
@pulumi.getter(name="extendedS3Configuration")
def extended_s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]:
"""
Enhanced configuration options for the s3 destination. More details are given below.
"""
return pulumi.get(self, "extended_s3_configuration")
@extended_s3_configuration.setter
def extended_s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]):
pulumi.set(self, "extended_s3_configuration", value)
@property
@pulumi.getter(name="httpEndpointConfiguration")
def http_endpoint_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]:
"""
Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "http_endpoint_configuration")
@http_endpoint_configuration.setter
def http_endpoint_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]):
pulumi.set(self, "http_endpoint_configuration", value)
@property
@pulumi.getter(name="kinesisSourceConfiguration")
def kinesis_source_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]:
"""
Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
"""
return pulumi.get(self, "kinesis_source_configuration")
@kinesis_source_configuration.setter
def kinesis_source_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]):
pulumi.set(self, "kinesis_source_configuration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="redshiftConfiguration")
def redshift_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]:
"""
Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "redshift_configuration")
@redshift_configuration.setter
def redshift_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamRedshiftConfigurationArgs']]):
pulumi.set(self, "redshift_configuration", value)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]:
"""
Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
"""
return pulumi.get(self, "s3_configuration")
@s3_configuration.setter
def s3_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamS3ConfigurationArgs']]):
pulumi.set(self, "s3_configuration", value)
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]:
"""
Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
"""
return pulumi.get(self, "server_side_encryption")
@server_side_encryption.setter
def server_side_encryption(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamServerSideEncryptionArgs']]):
pulumi.set(self, "server_side_encryption", value)
@property
@pulumi.getter(name="splunkConfiguration")
def splunk_configuration(self) -> Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]:
"""
Configuration options if splunk is the destination. More details are given below.
"""
return pulumi.get(self, "splunk_configuration")
@splunk_configuration.setter
def splunk_configuration(self, value: Optional[pulumi.Input['FirehoseDeliveryStreamSplunkConfigurationArgs']]):
pulumi.set(self, "splunk_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
class FirehoseDeliveryStream(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]] = None,
extended_s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]] = None,
http_endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]] = None,
kinesis_source_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']]] = None,
s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']]] = None,
server_side_encryption: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']]] = None,
splunk_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
For more details, see the [Amazon Kinesis Firehose Documentation](https://aws.amazon.com/documentation/firehose/).
## Example Usage
### Extended S3 Destination
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_processor = aws.lambda_.Function("lambdaProcessor",
code=pulumi.FileArchive("lambda.zip"),
role=lambda_iam.arn,
handler="exports.handler",
runtime="nodejs12.x")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
)],
)],
),
))
```
### S3 Destination (deprecated)
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="s3",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
))
```
### Redshift Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("testCluster",
cluster_identifier="tf-redshift-cluster",
database_name="test",
master_username="testuser",
master_password="<PASSWORD>",
node_type="dc1.large",
cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="redshift",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
username="testuser",
password="<PASSWORD>",
data_table_name="test-table",
copy_options="delimiter '|'",
data_table_columns="test-col",
s3_backup_mode="Enabled",
s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=15,
buffer_interval=300,
compression_format="GZIP",
),
))
```
### Elasticsearch Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose_role"]["arn"],
index_name="test",
type_name="test",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
)],
)],
),
))
```
### Elasticsearch Destination With VPC
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster",
cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
instance_count=2,
zone_awareness_enabled=True,
instance_type="t2.small.elasticsearch",
),
ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
ebs_enabled=True,
volume_size=10,
),
vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
security_group_ids=[aws_security_group["first"]["id"]],
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
))
firehose_elasticsearch = aws.iam.RolePolicy("firehose-elasticsearch",
role=aws_iam_role["firehose"]["id"],
policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"{test_cluster_arn}",
"{test_cluster_arn1}/*"
]
}},
{{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}}
]
}}
\"\"\"))
test = aws.kinesis.FirehoseDeliveryStream("test",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose"]["arn"],
index_name="test",
type_name="test",
vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
security_group_ids=[aws_security_group["first"]["id"]],
role_arn=aws_iam_role["firehose"]["arn"],
),
),
opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch]))
```
### Splunk Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="splunk",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
hec_token="5<PASSWORD>",
hec_acknowledgment_timeout=600,
hec_endpoint_type="Event",
s3_backup_mode="FailedEventsOnly",
))
```
### HTTP Endpoint (e.g. New Relic) Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="http_endpoint",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
url="https://aws-api.newrelic.com/firehose/v1",
name="<NAME>",
access_key="my-key",
buffering_size=15,
buffering_interval=600,
role_arn=aws_iam_role["firehose"]["arn"],
s3_backup_mode="FailedDataOnly",
request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
content_encoding="GZIP",
common_attributes=[
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname",
value="testvalue",
),
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname2",
value="testvalue2",
),
],
),
))
```
## Import
Kinesis Firehose Delivery streams can be imported using the stream ARN, e.g.
```sh
$ pulumi import aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream foo arn:aws:firehose:us-east-1:XXX:deliverystream/example
```
NoteImport does not work for stream destination `s3`. Consider using `extended_s3` since `s3` destination is deprecated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream
:param pulumi.Input[str] destination: This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']] elasticsearch_configuration: Configuration options if elasticsearch is the destination. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']] extended_s3_configuration: Enhanced configuration options for the s3 destination. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']] http_endpoint_configuration: Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']] kinesis_source_configuration: Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']] redshift_configuration: Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']] s3_configuration: Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']] server_side_encryption: Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']] splunk_configuration: Configuration options if splunk is the destination. More details are given below.
:param pulumi.Input[str] version_id: Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FirehoseDeliveryStreamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
For more details, see the [Amazon Kinesis Firehose Documentation](https://aws.amazon.com/documentation/firehose/).
## Example Usage
### Extended S3 Destination
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_iam = aws.iam.Role("lambdaIam", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
lambda_processor = aws.lambda_.Function("lambdaProcessor",
code=pulumi.FileArchive("lambda.zip"),
role=lambda_iam.arn,
handler="exports.handler",
runtime="nodejs12.x")
extended_s3_stream = aws.kinesis.FirehoseDeliveryStream("extendedS3Stream",
destination="extended_s3",
extended_s3_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
processing_configuration=aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=lambda_processor.arn.apply(lambda arn: f"{arn}:$LATEST"),
)],
)],
),
))
```
### S3 Destination (deprecated)
```python
import pulumi
import pulumi_aws as aws
bucket = aws.s3.Bucket("bucket", acl="private")
firehose_role = aws.iam.Role("firehoseRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="s3",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=firehose_role.arn,
bucket_arn=bucket.arn,
))
```
### Redshift Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.redshift.Cluster("testCluster",
cluster_identifier="tf-redshift-cluster",
database_name="test",
master_username="testuser",
master_password="<PASSWORD>",
node_type="dc1.large",
cluster_type="single-node")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="redshift",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
redshift_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
cluster_jdbcurl=pulumi.Output.all(test_cluster.endpoint, test_cluster.database_name).apply(lambda endpoint, database_name: f"jdbc:redshift://{endpoint}/{database_name}"),
username="testuser",
password="<PASSWORD>",
data_table_name="test-table",
copy_options="delimiter '|'",
data_table_columns="test-col",
s3_backup_mode="Enabled",
s3_backup_configuration=aws.kinesis.FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=15,
buffer_interval=300,
compression_format="GZIP",
),
))
```
### Elasticsearch Destination
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster")
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose_role"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose_role"]["arn"],
index_name="test",
type_name="test",
processing_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs(
enabled=True,
processors=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs(
type="Lambda",
parameters=[aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs(
parameter_name="LambdaArn",
parameter_value=f"{aws_lambda_function['lambda_processor']['arn']}:$LATEST",
)],
)],
),
))
```
### Elasticsearch Destination With VPC
```python
import pulumi
import pulumi_aws as aws
test_cluster = aws.elasticsearch.Domain("testCluster",
cluster_config=aws.elasticsearch.DomainClusterConfigArgs(
instance_count=2,
zone_awareness_enabled=True,
instance_type="t2.small.elasticsearch",
),
ebs_options=aws.elasticsearch.DomainEbsOptionsArgs(
ebs_enabled=True,
volume_size=10,
),
vpc_options=aws.elasticsearch.DomainVpcOptionsArgs(
security_group_ids=[aws_security_group["first"]["id"]],
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
))
firehose_elasticsearch = aws.iam.RolePolicy("firehose-elasticsearch",
role=aws_iam_role["firehose"]["id"],
policy=pulumi.Output.all(test_cluster.arn, test_cluster.arn).apply(lambda testClusterArn, testClusterArn1: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"{test_cluster_arn}",
"{test_cluster_arn1}/*"
]
}},
{{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}}
]
}}
\"\"\"))
test = aws.kinesis.FirehoseDeliveryStream("test",
destination="elasticsearch",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
),
elasticsearch_configuration=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationArgs(
domain_arn=test_cluster.arn,
role_arn=aws_iam_role["firehose"]["arn"],
index_name="test",
type_name="test",
vpc_config=aws.kinesis.FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs(
subnet_ids=[
aws_subnet["first"]["id"],
aws_subnet["second"]["id"],
],
security_group_ids=[aws_security_group["first"]["id"]],
role_arn=aws_iam_role["firehose"]["arn"],
),
),
opts=pulumi.ResourceOptions(depends_on=[firehose_elasticsearch]))
```
### Splunk Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="splunk",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
splunk_configuration=aws.kinesis.FirehoseDeliveryStreamSplunkConfigurationArgs(
hec_endpoint="https://http-inputs-mydomain.splunkcloud.com:443",
hec_token="<PASSWORD>",
hec_acknowledgment_timeout=600,
hec_endpoint_type="Event",
s3_backup_mode="FailedEventsOnly",
))
```
### HTTP Endpoint (e.g. New Relic) Destination
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.FirehoseDeliveryStream("testStream",
destination="http_endpoint",
s3_configuration=aws.kinesis.FirehoseDeliveryStreamS3ConfigurationArgs(
role_arn=aws_iam_role["firehose"]["arn"],
bucket_arn=aws_s3_bucket["bucket"]["arn"],
buffer_size=10,
buffer_interval=400,
compression_format="GZIP",
),
http_endpoint_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationArgs(
url="https://aws-api.newrelic.com/firehose/v1",
name="New Relic",
access_key="my-key",
buffering_size=15,
buffering_interval=600,
role_arn=aws_iam_role["firehose"]["arn"],
s3_backup_mode="FailedDataOnly",
request_configuration=aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs(
content_encoding="GZIP",
common_attributes=[
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname",
value="testvalue",
),
aws.kinesis.FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs(
name="testname2",
value="testvalue2",
),
],
),
))
```
## Import
Kinesis Firehose Delivery streams can be imported using the stream ARN, e.g.
```sh
$ pulumi import aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream foo arn:aws:firehose:us-east-1:XXX:deliverystream/example
```
NoteImport does not work for stream destination `s3`. Consider using `extended_s3` since `s3` destination is deprecated.
:param str resource_name: The name of the resource.
:param FirehoseDeliveryStreamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FirehoseDeliveryStreamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]] = None,
extended_s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]] = None,
http_endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]] = None,
kinesis_source_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']]] = None,
s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']]] = None,
server_side_encryption: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']]] = None,
splunk_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FirehoseDeliveryStreamArgs.__new__(FirehoseDeliveryStreamArgs)
__props__.__dict__["arn"] = arn
if destination is None and not opts.urn:
raise TypeError("Missing required property 'destination'")
__props__.__dict__["destination"] = destination
__props__.__dict__["destination_id"] = destination_id
__props__.__dict__["elasticsearch_configuration"] = elasticsearch_configuration
__props__.__dict__["extended_s3_configuration"] = extended_s3_configuration
__props__.__dict__["http_endpoint_configuration"] = http_endpoint_configuration
__props__.__dict__["kinesis_source_configuration"] = kinesis_source_configuration
__props__.__dict__["name"] = name
__props__.__dict__["redshift_configuration"] = redshift_configuration
__props__.__dict__["s3_configuration"] = s3_configuration
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["splunk_configuration"] = splunk_configuration
__props__.__dict__["tags"] = tags
__props__.__dict__["version_id"] = version_id
__props__.__dict__["tags_all"] = None
super(FirehoseDeliveryStream, __self__).__init__(
'aws:kinesis/firehoseDeliveryStream:FirehoseDeliveryStream',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
destination_id: Optional[pulumi.Input[str]] = None,
elasticsearch_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']]] = None,
extended_s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']]] = None,
http_endpoint_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']]] = None,
kinesis_source_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
redshift_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']]] = None,
s3_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']]] = None,
server_side_encryption: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']]] = None,
splunk_configuration: Optional[pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version_id: Optional[pulumi.Input[str]] = None) -> 'FirehoseDeliveryStream':
"""
Get an existing FirehoseDeliveryStream resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream
:param pulumi.Input[str] destination: This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamElasticsearchConfigurationArgs']] elasticsearch_configuration: Configuration options if elasticsearch is the destination. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamExtendedS3ConfigurationArgs']] extended_s3_configuration: Enhanced configuration options for the s3 destination. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamHttpEndpointConfigurationArgs']] http_endpoint_configuration: Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamKinesisSourceConfigurationArgs']] kinesis_source_configuration: Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamRedshiftConfigurationArgs']] redshift_configuration: Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamS3ConfigurationArgs']] s3_configuration: Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamServerSideEncryptionArgs']] server_side_encryption: Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
:param pulumi.Input[pulumi.InputType['FirehoseDeliveryStreamSplunkConfigurationArgs']] splunk_configuration: Configuration options if splunk is the destination. More details are given below.
:param pulumi.Input[str] version_id: Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FirehoseDeliveryStreamState.__new__(_FirehoseDeliveryStreamState)
__props__.__dict__["arn"] = arn
__props__.__dict__["destination"] = destination
__props__.__dict__["destination_id"] = destination_id
__props__.__dict__["elasticsearch_configuration"] = elasticsearch_configuration
__props__.__dict__["extended_s3_configuration"] = extended_s3_configuration
__props__.__dict__["http_endpoint_configuration"] = http_endpoint_configuration
__props__.__dict__["kinesis_source_configuration"] = kinesis_source_configuration
__props__.__dict__["name"] = name
__props__.__dict__["redshift_configuration"] = redshift_configuration
__props__.__dict__["s3_configuration"] = s3_configuration
__props__.__dict__["server_side_encryption"] = server_side_encryption
__props__.__dict__["splunk_configuration"] = splunk_configuration
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["version_id"] = version_id
return FirehoseDeliveryStream(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) specifying the Stream
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[str]:
"""
This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, and `http_endpoint`.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="destinationId")
def destination_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "destination_id")
@property
@pulumi.getter(name="elasticsearchConfiguration")
def elasticsearch_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamElasticsearchConfiguration']]:
"""
Configuration options if elasticsearch is the destination. More details are given below.
"""
return pulumi.get(self, "elasticsearch_configuration")
@property
@pulumi.getter(name="extendedS3Configuration")
def extended_s3_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamExtendedS3Configuration']]:
"""
Enhanced configuration options for the s3 destination. More details are given below.
"""
return pulumi.get(self, "extended_s3_configuration")
@property
@pulumi.getter(name="httpEndpointConfiguration")
def http_endpoint_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamHttpEndpointConfiguration']]:
"""
Configuration options if http_endpoint is the destination. requires the user to also specify a `s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "http_endpoint_configuration")
@property
@pulumi.getter(name="kinesisSourceConfiguration")
def kinesis_source_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamKinesisSourceConfiguration']]:
"""
Allows the ability to specify the kinesis stream that is used as the source of the firehose delivery stream.
"""
return pulumi.get(self, "kinesis_source_configuration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A name to identify the stream. This is unique to the
AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="redshiftConfiguration")
def redshift_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamRedshiftConfiguration']]:
"""
Configuration options if redshift is the destination.
Using `redshift_configuration` requires the user to also specify a
`s3_configuration` block. More details are given below.
"""
return pulumi.get(self, "redshift_configuration")
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamS3Configuration']]:
"""
Required for non-S3 destinations. For S3 destination, use `extended_s3_configuration` instead. Configuration options for the s3 destination (or the intermediate bucket if the destination
is redshift). More details are given below.
"""
return pulumi.get(self, "s3_configuration")
@property
@pulumi.getter(name="serverSideEncryption")
def server_side_encryption(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamServerSideEncryption']]:
"""
Encrypt at rest options.
Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream.
"""
return pulumi.get(self, "server_side_encryption")
@property
@pulumi.getter(name="splunkConfiguration")
def splunk_configuration(self) -> pulumi.Output[Optional['outputs.FirehoseDeliveryStreamSplunkConfiguration']]:
"""
Configuration options if splunk is the destination. More details are given below.
"""
return pulumi.get(self, "splunk_configuration")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="versionId")
def version_id(self) -> pulumi.Output[str]:
"""
Specifies the table version for the output data schema. Defaults to `LATEST`.
"""
return pulumi.get(self, "version_id")
| StarcoderdataPython |
5104703 | <filename>python/dgl/dataloading/neighbor.py
"""Data loading components for neighbor sampling"""
from .dataloader import BlockSampler
from .. import sampling, subgraph, distributed
class MultiLayerNeighborSampler(BlockSampler):
"""Sampler that builds computational dependency of node representations via
neighbor sampling for multilayer GNN.
This sampler will make every node gather messages from a fixed number of neighbors
per edge type. The neighbors are picked uniformly.
Parameters
----------
fanouts : list[int] or list[dict[etype, int] or None]
List of neighbors to sample per edge type for each GNN layer, starting from the
first layer.
If the graph is homogeneous, only an integer is needed for each layer.
If None is provided for one layer, all neighbors will be included regardless of
edge types.
If -1 is provided for one edge type on one layer, then all inbound edges
of that edge type will be included.
replace : bool, default True
Whether to sample with replacement
return_eids : bool, default False
Whether to return the edge IDs involved in message passing in the block.
If True, the edge IDs will be stored as an edge feature named ``dgl.EID``.
Examples
--------
To train a 3-layer GNN for node classification on a set of nodes ``train_nid`` on
a homogeneous graph where each node takes messages from 5, 10, 15 neighbors for
the first, second, and third layer respectively (assuming the backend is PyTorch):
>>> sampler = dgl.dataloading.MultiLayerNeighborSampler([5, 10, 15])
>>> collator = dgl.dataloading.NodeCollator(g, train_nid, sampler)
>>> dataloader = torch.utils.data.DataLoader(
... collator.dataset, collate_fn=collator.collate,
... batch_size=1024, shuffle=True, drop_last=False, num_workers=4)
>>> for blocks in dataloader:
... train_on(blocks)
If training on a heterogeneous graph and you want different number of neighbors for each
edge type, one should instead provide a list of dicts. Each dict would specify the
number of neighbors to pick per edge type.
>>> sampler = dgl.dataloading.MultiLayerNeighborSampler([
... {('user', 'follows', 'user'): 5,
... ('user', 'plays', 'game'): 4,
... ('game', 'played-by', 'user'): 3}] * 3)
"""
def __init__(self, fanouts, replace=False, return_eids=False):
super().__init__(len(fanouts), return_eids)
self.fanouts = fanouts
self.replace = replace
def sample_frontier(self, block_id, g, seed_nodes):
fanout = self.fanouts[block_id]
if isinstance(g, distributed.DistGraph):
if fanout is None:
frontier = distributed.in_subgraph(g, seed_nodes)
else:
frontier = distributed.sample_neighbors(g, seed_nodes, fanout, replace=self.replace)
else:
if fanout is None:
frontier = subgraph.in_subgraph(g, seed_nodes)
else:
frontier = sampling.sample_neighbors(g, seed_nodes, fanout, replace=self.replace)
return frontier
class MultiLayerFullNeighborSampler(MultiLayerNeighborSampler):
"""Sampler that builds computational dependency of node representations by taking messages
from all neighbors for multilayer GNN.
This sampler will make every node gather messages from every single neighbor per edge type.
Parameters
----------
n_layers : int
The number of GNN layers to sample.
return_eids : bool, default False
Whether to return the edge IDs involved in message passing in the block.
If True, the edge IDs will be stored as an edge feature named ``dgl.EID``.
Examples
--------
To train a 3-layer GNN for node classification on a set of nodes ``train_nid`` on
a homogeneous graph where each node takes messages from all neighbors for the first,
second, and third layer respectively (assuming the backend is PyTorch):
>>> sampler = dgl.dataloading.MultiLayerFullNeighborSampler(3)
>>> collator = dgl.dataloading.NodeCollator(g, train_nid, sampler)
>>> dataloader = torch.utils.data.DataLoader(
... collator.dataset, collate_fn=collator.collate,
... batch_size=1024, shuffle=True, drop_last=False, num_workers=4)
>>> for blocks in dataloader:
... train_on(blocks)
"""
def __init__(self, n_layers, return_eids=False):
super().__init__([None] * n_layers, return_eids=return_eids)
| StarcoderdataPython |
5024423 | # Copyright 2018 NTRlab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sawtooth_bgt.bgt_consensus.consensus_state import ConsensusState
from sawtooth_bgt.bgt_consensus.consensus_state_store \
import ConsensusStateStore
from sawtooth_bgt.bgt_consensus import bgt_enclave_factory as factory
from sawtooth_bgt.bgt_consensus import utils
from sawtooth_bgt.bgt_consensus.bgt_settings_view import BgtSettingsView
from sawtooth_bgt.journal.block_wrapper import BlockWrapper
from sawtooth_bgt.journal.consensus.consensus \
import ForkResolverInterface
from sawtooth_bgt_common.validator_registry_view.validator_registry_view \
import ValidatorRegistryView
LOGGER = logging.getLogger(__name__)
class BgtForkResolver(ForkResolverInterface):
# Provides the fork resolution interface for the BlockValidator to use
# when deciding between 2 forks.
def __init__(self,
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id):
"""Initialize the object, is passed (read-only) state access objects.
Args:
block_cache (BlockCache): Dict interface to the block cache.
Any predecessor block to blocks handed to this object will
be present in this dict.
state_view_factory (StateViewFactory): A factory that can be
used to create read-only views of state for a particular
merkle root, in particular the state as it existed when a
particular block was the chain head.
data_dir (str): path to location where persistent data for the
consensus module can be stored.
config_dir (str): path to location where config data for the
consensus module can be found.
validator_id (str): A unique ID for this validator
Returns:
none.
"""
super().__init__(
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id)
self._block_cache = block_cache
self._state_view_factory = state_view_factory
self._data_dir = data_dir
self._config_dir = config_dir
self._validator_id = validator_id
self._consensus_state_store = \
ConsensusStateStore(
data_dir=self._data_dir,
validator_id=self._validator_id)
def compare_forks(self, cur_fork_head, new_fork_head):
"""Given the head of two forks, return which should be the fork that
the validator chooses. When this is called both forks consist of
only valid blocks.
Args:
cur_fork_head (Block): The current head of the block chain.
new_fork_head (Block): The head of the fork that is being
evaluated.
Returns:
Boolean: True if the new chain should replace the current chain.
False if the new chain should be discarded.
"""
chosen_fork_head = None
state_view = \
BlockWrapper.state_view_for_block(
block_wrapper=cur_fork_head,
state_view_factory=self._state_view_factory)
bgt_enclave_module = \
factory.BgtEnclaveFactory.get_bgt_enclave_module(
state_view=state_view,
config_dir=self._config_dir,
data_dir=self._data_dir)
current_fork_wait_certificate = \
utils.deserialize_wait_certificate(
block=cur_fork_head,
bgt_enclave_module=bgt_enclave_module)
new_fork_wait_certificate = \
utils.deserialize_wait_certificate(
block=new_fork_head,
bgt_enclave_module=bgt_enclave_module)
# If we ever get a new fork head that is not a BGT block, then bail
# out. This should never happen, but defensively protect against it.
if new_fork_wait_certificate is None:
raise \
TypeError(
'New fork head {} is not a BGT block'.format(
new_fork_head.identifier[:8]))
# Criterion #1: If the current fork head is not BGT, then check to see
# if the new fork head is building on top of it. That would be okay.
# However if not, then we don't have a good deterministic way of
# choosing a winner. Again, the latter should never happen, but
# defensively protect against it.
if current_fork_wait_certificate is None:
if new_fork_head.previous_block_id == cur_fork_head.identifier:
LOGGER.info(
'Choose new fork %s over current fork %s: '
'New fork head switches consensus to BGT',
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8])
chosen_fork_head = new_fork_head
else:
raise \
TypeError(
'Trying to compare a BGT block {} to a non-BGT '
'block {} that is not the direct predecessor'.format(
new_fork_head.identifier[:8],
cur_fork_head.identifier[:8]))
# Criterion #2: If they share the same immediate previous block,
# then the one with the smaller wait duration is chosen
elif cur_fork_head.previous_block_id == \
new_fork_head.previous_block_id:
if current_fork_wait_certificate.duration < \
new_fork_wait_certificate.duration:
LOGGER.info(
'Choose current fork %s over new fork %s: '
'Current fork wait duration (%f) less than new fork wait '
'duration (%f)',
cur_fork_head.header_signature[:8],
new_fork_head.header_signature[:8],
current_fork_wait_certificate.duration,
new_fork_wait_certificate.duration)
chosen_fork_head = cur_fork_head
elif new_fork_wait_certificate.duration < \
current_fork_wait_certificate.duration:
LOGGER.info(
'Choose new fork %s over current fork %s: '
'New fork wait duration (%f) less than current fork wait '
'duration (%f)',
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8],
new_fork_wait_certificate.duration,
current_fork_wait_certificate.duration)
chosen_fork_head = new_fork_head
# Criterion #3: If they don't share the same immediate previous
# block, then the one with the higher aggregate local mean wins
else:
# Get the consensus state for the current fork head and the
# block immediately before the new fork head (as we haven't
# committed to the block yet). So that the new fork doesn't
# have to fight with one hand tied behind its back, add the
# new fork head's wait certificate's local mean to the
# aggregate local mean for the predecessor block's consensus
# state for the comparison.
current_fork_consensus_state = \
ConsensusState.consensus_state_for_block_id(
block_id=cur_fork_head.identifier,
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
consensus_state_store=self._consensus_state_store,
bgt_enclave_module=bgt_enclave_module)
new_fork_consensus_state = \
ConsensusState.consensus_state_for_block_id(
block_id=new_fork_head.previous_block_id,
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
consensus_state_store=self._consensus_state_store,
bgt_enclave_module=bgt_enclave_module)
new_fork_aggregate_local_mean = \
new_fork_consensus_state.aggregate_local_mean + \
new_fork_wait_certificate.local_mean
if current_fork_consensus_state.aggregate_local_mean > \
new_fork_aggregate_local_mean:
LOGGER.info(
'Choose current fork %s over new fork %s: '
'Current fork aggregate local mean (%f) greater than new '
'fork aggregate local mean (%f)',
cur_fork_head.header_signature[:8],
new_fork_head.header_signature[:8],
current_fork_consensus_state.aggregate_local_mean,
new_fork_aggregate_local_mean)
chosen_fork_head = cur_fork_head
elif new_fork_aggregate_local_mean > \
current_fork_consensus_state.aggregate_local_mean:
LOGGER.info(
'Choose new fork %s over current fork %s: '
'New fork aggregate local mean (%f) greater than current '
'fork aggregate local mean (%f)',
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8],
new_fork_aggregate_local_mean,
current_fork_consensus_state.aggregate_local_mean)
chosen_fork_head = new_fork_head
# Criterion #4: If we have gotten to this point and we have not chosen
# yet, we are going to fall back on using the block identifiers
# (header signatures) . The lexicographically larger one will be the
# chosen one. The chance that they are equal are infinitesimally
# small.
if chosen_fork_head is None:
if cur_fork_head.header_signature > \
new_fork_head.header_signature:
LOGGER.info(
'Choose current fork %s over new fork %s: '
'Current fork header signature (%s) greater than new fork '
'header signature (%s)',
cur_fork_head.header_signature[:8],
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8],
new_fork_head.header_signature[:8])
chosen_fork_head = cur_fork_head
else:
LOGGER.info(
'Choose new fork %s over current fork %s: '
'New fork header signature (%s) greater than current fork '
'header signature (%s)',
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8],
new_fork_head.header_signature[:8],
cur_fork_head.header_signature[:8])
chosen_fork_head = new_fork_head
# Now that we have chosen a fork for the chain head, if we chose the
# new fork and it is a BGT block (i.e., it has a wait certificate),
# we need to create consensus state store information for the new
# fork's chain head.
if chosen_fork_head == new_fork_head:
# Get the state view for the previous block in the chain so we can
# create a BGT enclave
previous_block = None
try:
previous_block = \
self._block_cache[new_fork_head.previous_block_id]
except KeyError:
pass
state_view = \
BlockWrapper.state_view_for_block(
block_wrapper=previous_block,
state_view_factory=self._state_view_factory)
validator_registry_view = ValidatorRegistryView(state_view)
try:
# Get the validator info for the validator that claimed the
# fork head
validator_info = \
validator_registry_view.get_validator_info(
new_fork_head.header.signer_public_key)
# Get the consensus state for the new fork head's previous
# block, let the consensus state update itself appropriately
# based upon the validator claiming a block, and then
# associate the consensus state with the new block in the
# store.
consensus_state = \
ConsensusState.consensus_state_for_block_id(
block_id=new_fork_head.previous_block_id,
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
consensus_state_store=self._consensus_state_store,
bgt_enclave_module=bgt_enclave_module)
consensus_state.validator_did_claim_block(
validator_info=validator_info,
wait_certificate=new_fork_wait_certificate,
bgt_settings_view=BgtSettingsView(state_view))
self._consensus_state_store[new_fork_head.identifier] = \
consensus_state
LOGGER.debug(
'Create consensus state: BID=%s, ALM=%f, TBCC=%d',
new_fork_head.identifier[:8],
consensus_state.aggregate_local_mean,
consensus_state.total_block_claim_count)
except KeyError:
# This _should_ never happen. The new potential fork head
# has to have been a BGT block and for it to be verified
# by the BGT block verifier, it must have been signed by
# validator in the validator registry. If not found, we
# are going to just stick with the current fork head.
LOGGER.error(
'New fork head claimed by validator not in validator '
'registry: %s...%s',
new_fork_head.header.signer_public_key[:8],
new_fork_head.header.signer_public_key[-8:])
chosen_fork_head = cur_fork_head
return chosen_fork_head == new_fork_head
| StarcoderdataPython |
4883546 | import json
import re
import os
import pathlib
import requests
from urllib.parse import urlparse, urlunparse, parse_qs, unquote
from girder import events, logger
from girder.constants import AccessType
from girder.models.folder import Folder
from girder.models.setting import Setting
from ..import_providers import ImportProvider
from ..data_map import DataMap
from ..file_map import FileMap
from ..import_item import ImportItem
from ..entity import Entity
from ... import constants
_DOI_REGEX = re.compile(r'(10.\d{4,9}/[-._;()/:A-Z0-9]+)', re.IGNORECASE)
_QUOTES_REGEX = re.compile(r'"(.*)"')
_CNTDISP_REGEX = re.compile(r'filename="(.*)"')
_CNTDISPS_REGEX = re.compile(r"^attachment; filename\*=.*''(.*)$")
def _query_dataverse(search_url):
req = requests.get(search_url)
data = req.json()["data"]
if data['count_in_response'] != 1:
raise ValueError
item = data['items'][0]
files = [{
'filename': item['name'],
'mimeType': item['file_content_type'],
'filesize': item['size_in_bytes'],
'id': item['file_id'],
'doi': item.get('filePersistentId') # https://github.com/IQSS/dataverse/issues/5339
}]
title = item['name']
title_search = _QUOTES_REGEX.search(item['dataset_citation'])
if title_search is not None:
title = title_search.group().strip('"')
doi = None
doi_search = _DOI_REGEX.search(item['dataset_citation'])
if doi_search is not None:
doi = "doi:" + doi_search.group() # TODO: get a proper protocol
return title, files, doi
def _get_attrs_via_head(obj, url):
name = obj["filename"]
size = obj["filesize"]
# start by regular HEAD, trick is it's gonna fail with 403
# if the file is sitting on S3
# see https://github.com/IQSS/dataverse/issues/5322
req = requests.head(url, allow_redirects=True)
if req.ok:
size = int(req.headers.get("Content-Length", size))
else:
# Now the magic, since S3 accepts range request, we cheat the system
# by requesting only 100 bytes to get the headers we want.
# Isn't it beautiful?!
req = requests.get(url, headers={"Range": "bytes=0-100"})
if not req.ok or "Content-Range" not in req.headers:
# oh well, I tried...
return name, size
size = int(req.headers["Content-Range"].split("/")[-1])
# This is common to both HEAD and GET from above.
content_disposition = req.headers.get("Content-Disposition")
if content_disposition:
for regex in (
_CNTDISP_REGEX.search(content_disposition),
_CNTDISPS_REGEX.match(content_disposition)
):
if regex:
name = unquote(regex.groups()[0])
break
return name, size
class DataverseImportProvider(ImportProvider):
def __init__(self):
super().__init__('Dataverse')
events.bind('model.setting.save.after', 'wholetale', self.setting_changed)
@staticmethod
def get_base_url_setting():
return Setting().get(constants.PluginSettings.DATAVERSE_URL)
@staticmethod
def get_extra_hosts_setting():
return Setting().get(constants.PluginSettings.DATAVERSE_EXTRA_HOSTS)
def create_regex(self):
url = self.get_base_url_setting()
if not url.endswith('json'):
url = urlunparse(
urlparse(url)._replace(path='/api/info/version')
)
try:
req = requests.get(url)
data = req.json()
except Exception:
logger.warning(
"[dataverse] failed to fetch installations, using a local copy."
)
with open(os.path.join(os.path.dirname(__file__), "installations.json"), "r") as fp:
data = json.load(fp)
# in case DATAVERSE_URL points to a specific instance rather than an installation JSON
# we need to add its domain to the regex
single_hostname = urlparse(self.get_base_url_setting()).netloc
domains = [
_["hostname"]
for _ in data.get("installations", [{"hostname": single_hostname}])
]
domains += self.get_extra_hosts_setting()
if domains:
return re.compile("^https?://(" + "|".join(domains) + ").*$")
else:
return re.compile("^$")
def getDatasetUID(self, doc: object, user: object) -> str:
if 'folderId' in doc:
# It's an item, grab the parent which should contain all the info
doc = Folder().load(doc['folderId'], user=user, level=AccessType.READ)
# obj is a folder at this point use its meta
if not doc["meta"].get("identifier"):
doc = Folder().load(doc["parentId"], user=user, level=AccessType.READ)
return self.getDatasetUID(doc, user)
return doc['meta']['identifier']
def setting_changed(self, event):
triggers = {
constants.PluginSettings.DATAVERSE_URL, constants.PluginSettings.DATAVERSE_EXTRA_HOSTS
}
if not hasattr(event, "info") or event.info.get('key', '') not in triggers:
return
self._regex = None
@staticmethod
def _get_meta_from_dataset(url):
"""Get metadata for Dataverse dataset.
Handles: {siteURL}/dataset.xhtml?persistentId={persistentId}
Handles: {siteURL}/api/datasets/{:id}
"""
if "persistentId" in url.query:
dataset_url = urlunparse(
url._replace(path='/api/datasets/:persistentId')
)
else:
dataset_url = urlunparse(url)
req = requests.get(dataset_url)
return req.json()
def _parse_dataset(self, url):
"""Extract title, file, doi from Dataverse resource.
Handles: {siteURL}/dataset.xhtml?persistentId={persistentId}
Handles: {siteURL}/api/datasets/{:id}
"""
data = self._get_meta_from_dataset(url)
meta = data['data']['latestVersion']['metadataBlocks']['citation']['fields']
title = next(_['value'] for _ in meta if _['typeName'] == 'title')
doi = '{protocol}:{authority}/{identifier}'.format(**data['data'])
files = []
for obj in data['data']['latestVersion']['files']:
files.append({
'filename': obj['dataFile']['filename'],
'filesize': obj['dataFile']['filesize'],
'mimeType': obj['dataFile']['contentType'],
'id': obj['dataFile']['id'],
'doi': obj['dataFile']['persistentId'],
'directoryLabel': obj.get('directoryLabel', ''),
})
return title, files, doi
@staticmethod
def _files_to_hierarchy(files):
hierarchy = {'+files+': []}
for fobj in files:
temp = hierarchy
for subdir in pathlib.Path(fobj.get('directoryLabel', '')).parts:
if subdir not in temp:
temp[subdir] = {'+files+': []}
temp = temp[subdir]
temp['+files+'].append(fobj)
return hierarchy
@staticmethod
def _parse_file_url(url):
"""Extract title, file, doi from Dataverse resource.
Handles:
{siteURL}/file.xhtml?persistentId={persistentId}&...
{siteURL}/api/access/datafile/:persistentId/?persistentId={persistentId}
"""
qs = parse_qs(url.query)
try:
full_doi = qs['persistentId'][0]
except (KeyError, ValueError):
# fail here in a meaningful way...
raise
file_persistent_id = os.path.basename(full_doi)
doi = os.path.dirname(full_doi)
search_url = urlunparse(
url._replace(path='/api/search', query='q=filePersistentId:' + file_persistent_id)
)
title, files, _ = _query_dataverse(search_url)
return title, files, doi
@staticmethod
def _parse_access_url(url):
"""Extract title, file, doi from Dataverse resource.
Handles: {siteURL}/api/access/datafile/{fileId}
"""
fileId = os.path.basename(url.path)
search_url = urlunparse(
url._replace(path='/api/search', query='q=entityId:' + fileId)
)
return _query_dataverse(search_url)
@staticmethod
def _sanitize_files(url, files):
"""Sanitize files metadata since results from search queries are inaccurate.
File size is wrong: https://github.com/IQSS/dataverse/issues/5321
URL doesn't point to original format, by default.
"""
def _update_attrs(url, obj, query):
access_url = urlunparse(
url._replace(path='/api/access/datafile/' + fileId,
query=query)
)
name, size = _get_attrs_via_head(obj, access_url)
obj['filesize'] = size
obj['filename'] = name
obj['url'] = access_url
return obj
for obj in files:
fileId = str(obj['id'])
# Register original too
if obj['mimeType'] == 'text/tab-separated-values':
yield _update_attrs(url, obj.copy(), 'format=original')
yield _update_attrs(url, obj.copy(), '')
else:
obj['url'] = urlunparse(
url._replace(path='/api/access/datafile/' + fileId,
query='')
)
yield obj
def parse_pid(self, pid: str, sanitize: bool = False):
url = urlparse(pid)
if url.path.endswith('file.xhtml') or \
url.path.startswith('/api/access/datafile/:persistentId'):
parse_method = self._parse_file_url
elif url.path.startswith('/api/access/datafile'):
parse_method = self._parse_access_url
else:
parse_method = self._parse_dataset
title, files, doi = parse_method(url)
if sanitize:
files = list(self._sanitize_files(url, files))
return title, files, doi
def lookup(self, entity: Entity) -> DataMap:
title, files, doi = self.parse_pid(entity.getValue())
size = sum(_['filesize'] for _ in files)
return DataMap(entity.getValue(), size, doi=doi, name=title,
repository=self.getName())
def listFiles(self, entity: Entity) -> FileMap:
stack = []
top = None
for item in self._listRecursive(entity.getUser(), entity.getValue(), None):
if item.type == ImportItem.FOLDER:
if len(stack) == 0:
fm = FileMap(item.name)
else:
fm = stack[-1].addChild(item.name)
stack.append(fm)
elif item.type == ImportItem.END_FOLDER:
top = stack.pop()
elif item.type == ImportItem.FILE:
stack[-1].addFile(item.name, item.size)
return top
def _listRecursive(self, user, pid: str, name: str, base_url: str = None,
progress=None):
def _recurse_hierarchy(hierarchy):
files = hierarchy.pop('+files+')
for obj in files:
yield ImportItem(
ImportItem.FILE, obj['filename'],
size=obj['filesize'],
mimeType=obj.get('mimeType', 'application/octet-stream'),
url=obj['url'],
identifier=obj.get('doi') or doi
)
for folder in hierarchy.keys():
yield ImportItem(ImportItem.FOLDER, name=folder)
yield from _recurse_hierarchy(hierarchy[folder])
yield ImportItem(ImportItem.END_FOLDER)
title, files, doi = self.parse_pid(pid, sanitize=True)
hierarchy = self._files_to_hierarchy(files)
yield ImportItem(ImportItem.FOLDER, name=title, identifier=doi)
yield from _recurse_hierarchy(hierarchy)
yield ImportItem(ImportItem.END_FOLDER)
def proto_tale_from_datamap(self, dataMap: DataMap, asTale: bool) -> object:
proto_tale = super().proto_tale_from_datamap(dataMap, asTale) # get the defaults
if not asTale:
return proto_tale # We only bring extra metadata for datasets imported as Tales
data = self._get_meta_from_dataset(urlparse(dataMap["dataId"]))
meta = data["data"]["latestVersion"]["metadataBlocks"]["citation"]["fields"]
for field in meta:
if field["typeName"] == "title":
proto_tale["title"] = field["value"]
elif field["typeName"] == "dsDescription":
# In theory there can be more than one ... needs example
proto_tale["description"] = field["value"][0]["dsDescriptionValue"]["value"]
elif field["typeName"] == "subject":
proto_tale["category"] = "; ".join(field["value"])
elif field["typeName"] == "author":
authors = []
for author in field["value"]:
raw_author = author["authorName"]["value"]
if "," in raw_author:
lastName, firstName = raw_author.split(",", 1)
else:
firstName, lastName = raw_author.split(" ", 1)
if (
"authorIdentifierScheme" in author
and author["authorIdentifierScheme"]["value"] == "ORCID" # noqa
):
orcid = author["authorIdentifier"]["value"]
else:
orcid = "0000-0000-0000-0000"
authors.append(
dict(
firstName=firstName.strip(),
lastName=lastName.strip(),
orcid=f"https://www.orcid.org/{orcid}",
)
)
proto_tale["authors"] = authors
return proto_tale
| StarcoderdataPython |
5052157 | from os import path
import sys
import json
from decimal import Decimal
sys.path.append(path.join(path.dirname(__file__), '../handlers'))
from helpers.utils import get_dynamodb_table, get_build_item
queue_table = get_dynamodb_table()
def stringify_decimal(obj):
if isinstance(obj, Decimal):
return str(obj)
else:
return obj
build_item = get_build_item("BLD#master#2020-07-12T15:28:04.697734-05:00")
pages = json.loads(json.dumps({
"pages": build_item["pages"],
}, default=stringify_decimal))
print(pages)
| StarcoderdataPython |
3460353 | # Generated by Django 3.2.3 on 2021-06-22 02:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('preelec9_camp', '0014_auto_20210615_1653'),
]
operations = [
migrations.CreateModel(
name='Shirt',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size_shirt', models.CharField(choices=[('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('XXL', 'XXL')], max_length=10)),
('quantity_shirt', models.CharField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], max_length=10)),
('color_shirt', models.CharField(choices=[('black', 'black'), ('white', 'white')], max_length=20)),
('check_shirt', models.ImageField(upload_to='images/preelec9_camp/shirt')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Size_shirt',
'verbose_name_plural': 'Size_shirt',
},
),
]
| StarcoderdataPython |
4874881 | <gh_stars>1-10
import argparse
import torch
from utils import channelwised_normalize, binarize, SSIM
import torch.nn as nn
import model as Model
import numpy as np
from tqdm import tqdm
import random
import torchvision
from tensorboardX import SummaryWriter
import os
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn.functional as F
from datasets import MVTecAd
from matplotlib import pyplot as plt
from PIL import ImageDraw
import cv2
from PIL import ImageFont
from PIL import Image
from utils import enhanceMorph
from glob import glob
os.environ["PYTHONBREAKPOINT"] = "pudb.set_trace"
from sklearn.metrics import roc_auc_score
if __name__ == "__main__":
# Training settings
eval_folder = "eval"
os.makedirs(eval_folder, exist_ok=True)
log = open(os.path.join(eval_folder, "benchmark.txt"), "w")
perf_results = {}
min_cat = {"auc": 2}
max_cat = {"auc": 0}
max_all = {"auc": 0}
min_all = {"auc": 2}
m_paths = ["weights/v2_420_MSE_512.pth", "weights/v4_110_MSE_256.pth", "weights/v5_340_MSE_512.pth", "weights/v2_360_SSIM_512.pth", "weights/v4_380_SSIM_512.pth", "weights/v5_SSIM_450_256.pth"]
arcs = ["Bottleneckv2", "Bottleneckv4", "Bottleneckv5","Bottleneckv2", "Bottleneckv4", "Bottleneckv5"]
for model_path, model_arc in zip(m_paths, arcs):
print(f"{'='*30}{model_path}{'='*30}")
log.write(f"{'='*30}{model_path}{'='*30}\n")
model = getattr(Model, model_arc)(input_channels = 3)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Loading model {model_path}")
model.load_state_dict(torch.load(model_path, map_location=device))
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model = model.to(device)
model.eval()
perf_results[model_path] = {
"weight" : model_path,
# "mean_all": 0,
"performance": [
# {
# "defect_type": "",
# "mean_auc": 0,
# "details": []
# }
]
}
overall_total = 0
overall_files_count = 0
for defect in ["crack", "cut", "hole", "print"]:
performance = {
"defect_type": defect,
"mean_auc": 0,
"details": []
}
print(f"{defect}:")
log.write(f"{defect}:\n")
files = glob(f"dataset/mvtec_anomaly_detection/hazelnut/test/{defect}/*.png")
total = 0
for i, input_path in enumerate(files):
gt_path = input_path.replace("test", "ground_truth").replace(".png","_mask.png")
# input = cv2.imread(input_path)
gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
# cv2.namedWindow('image')
# cv2.imshow('image',input)
filename = os.path.basename(os.path.normpath(input_path))
filename, ext = filename.split(".")
input = Image.open(input_path)
input= torchvision.transforms.ToTensor()(input).unsqueeze(0).to(device)
# input_torch = torch.from_numpy(input).permute(2,0,1).unsqueeze(0)
output = model(input)
diff_avg = 1 - SSIM(input, output)[1]
diff_avg = torch.mean(diff_avg, dim=1, keepdim=True)
diff_avg = channelwised_normalize(diff_avg).detach().cpu()
# enhanced_avg = enhanceMorph(diff_avg.numpy())
# folder = f"{eval_folder}/{defect}"
# cv2.imwrite(f"{folder}/{filename}_gt.{ext}", gt)
# os.makedirs(folder, exist_ok=True)
# torchvision.utils.save_image(diff_avg, f"{folder}/{filename}.{ext}")
# torchvision.utils.save_image(enhanced_avg, f"{folder}/{filename}_enh.{ext}")
gt= (gt/255.0).reshape(-1)
diff_avg = diff_avg.numpy().squeeze(0).squeeze(0).reshape(-1)
auc = roc_auc_score(gt, diff_avg)
print(f"file {input_path}, {defect}: {auc}")
log.write(f"file {input_path}, {defect}: {auc}\n")
total += auc
performance["details"].append({
"file_path": input_path,
"auc": auc
})
# enhanced_avg = enhanced_avg.numpy().squeeze(0).squeeze(0).reshape(-1)
mean_auc = total/(len(files))
print("-"*20)
log.write("-"*20)
log.write("\n")
print(f"Average AUC: {defect} - {mean_auc} over {len(files)} files.")
log.write(f"Average AUC: {defect} - {mean_auc} over {len(files)} files. \n")
performance["mean_auc"] = mean_auc
if mean_auc > max_cat["auc"]:
max_cat["auc"] = mean_auc
max_cat["model"] = model_path
max_cat["cat"] = defect
if mean_auc < min_cat["auc"]:
min_cat["auc"] = mean_auc
min_cat["model"] = model_path
min_cat["cat"] = defect
perf_results[model_path]["performance"].append(performance)
overall_total+=total
overall_files_count += len(files)
mean_all = overall_total/overall_files_count
perf_results[model_path]["mean_all"] = mean_all
print(f"Average overall: {mean_all}")
log.write(f"Average overall: {mean_all}\n")
if mean_all > max_all["auc"]:
max_all["auc"] = mean_all
max_all["model"] = model_path
if mean_all < min_all["auc"]:
min_all["auc"] = mean_all
min_all["model"] = model_path
fn =os.path.join(eval_folder, f"roc_benchmark.npy")
np.save(fn, perf_results)
print(f"Saved results to {fn}")
print(f"-----Max:-----")
print(f"By category: {max_cat['cat']} {max_cat['auc']} - {max_cat['model']}")
print(f"Overall: {max_all['auc']} - {max_all['model']}")
print(f"-----Min:-----")
print(f"By category: {min_cat['cat']} {min_cat['auc']} - {min_cat['model']}")
print(f"Overall: {min_all['auc']} - {min_all['model']}")
log.write(f"-----Max:-----\n")
log.write(f"By category: {max_cat['cat']} {max_cat['auc']} - {max_cat['model']}\n")
log.write(f"Overall: {max_all['auc']} - {max_all['model']}\n")
log.write(f"-----Min:-----\n")
log.write(f"By category: {min_cat['cat']} {min_cat['auc']} - {min_cat['model']}\n")
log.write(f"Overall: {min_all['auc']} - {min_all['model']}\n")
log.close() | StarcoderdataPython |
8173875 | import ctypes
from api.capi import c_lib, c_callback
from api.color import color
from api.base import base
c_lib.ui_set_font.argtypes = (ctypes.c_char_p, ctypes.c_int, ctypes.c_float, ctypes.c_char_p)
c_lib.ui_set_pivot.argtypes = (ctypes.c_int, ctypes.c_float, ctypes.c_float)
c_lib.ui_set_size.argtypes = (ctypes.c_int, ctypes.c_float, ctypes.c_float)
c_lib.ui_add_slider.argtypes = (ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_float, ctypes.c_float)
c_lib.ui_set_slider.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_float)
c_lib.ui_get_slider.restype = ctypes.c_float
c_lib.ui_get_list_value.restype = ctypes.c_char_p
c_lib.ui_set_color.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_float, ctypes.c_float, ctypes.c_float, ctypes.c_float)
class ui_label:
__slots__ = ('__id','__p','__text')
def __init__(self, p, id, text):
self.__text = text
self.__p = p
self.__id = id
@property
def text(self):
return self.__text
@text.setter
def text(self, text):
if text is None:
text = ""
if self.__text != text:
self.__text = text
c_lib.ui_set_text(self.__p._ui__id, self.__id, text.encode())
class ui_btn(ui_label):
__slots__ = ()
def __init__(self, p, id, text):
super().__init__(p, id, text)
@property
def value(self):
return c_lib.ui_get_bool(self._ui_label__p._ui__id, self._ui_label__id)
class ui_slider(ui_label):
__slots__ = ()
def __init__(self, p, id, text):
super().__init__(p, id, text)
@property
def value(self):
return c_lib.ui_get_slider(self._ui_label__p._ui__id, self._ui_label__id)
@value.setter
def value(self, v):
c_lib.ui_set_slider(self._ui_label__p._ui__id, self._ui_label__id, v)
r = ctypes.c_float(0)
rp = ctypes.byref(r)
g = ctypes.c_float(0)
gp = ctypes.byref(g)
b = ctypes.c_float(0)
bp = ctypes.byref(b)
a = ctypes.c_float(0)
ap = ctypes.byref(a)
class ui_coloredit(ui_label):
__slots__ = ()
def __init__(self, p, id, text):
super().__init__(p, id, text)
@property
def value(self):
c_lib.ui_get_color(self._ui_label__p._ui__id, self._ui_label__id, rp, gp, bp, ap)
return color(r.value, g.value, b.value, a.value)
@value.setter
def value(self, v):
c_lib.ui_set_color(self._ui_label__p._ui__id, self._ui_label__id, v.r, v.g, v.b, v.a)
class ui_checkbox(ui_label):
__slots__ = ()
def __init__(self, p, id, text):
super().__init__(p, id, text)
@property
def value(self):
return c_lib.ui_get_bool(self._ui_label__p._ui__id, self._ui_label__id)
@value.setter
def value(self, v):
c_lib.ui_set_bool(self._ui_label__p._ui__id, self._ui_label__id, v)
class ui_list:
__slots__ = ('__items', '__p', '__id')
def __init__(self, p, id, items):
self.__p = p
self.__id = id
self.__items = items
@property
def value(self):
return c_lib.ui_get_list_value(self.__p._ui__id, self.__id)
@value.setter
def value(self, v):
if v is None:
raise NotImplementedError
c_lib.ui_set_list_value(self.__p._ui__id, self.__id, v.encode())
@property
def idx(self):
return c_lib.ui_get_list_idx(self.__p._ui__id, self.__id)
@idx.setter
def idx(self, v):
if v is None:
raise NotImplementedError
l = len(self.__items)
if l == 0:
return
v = v % l
if v < 0:
v += l
c_lib.ui_set_list_idx(self.__p._ui__id, self.__id, int(v))
@property
def items(self):
return tuple(self.__items)
@items.setter
def items(self, new_items):
items_encoded = [itm.encode() for itm in new_items]
arr = (ctypes.c_char_p * len(items_encoded))()
arr[:] = items_encoded
c_lib.ui_set_list(self.__p._ui__id, self.__id, arr, len(items_encoded))
self.__items = new_items
class ui(base):
__slots__ = ('__id', '__w', '__h', '__px', '__py', '__callbacks')
def __init__(self, caption = None, w = 0.35, h = 0.5, px = 0.5, py = 1, style = "light", ignore_input = False, no_background = False):
if caption is None:
caption = ""
self.__id = c_lib.ui_create()
self.__px = px
self.__py = py
self.__callbacks = []
self.__w = w
self.__h = h
c_lib.ui_set_style(self.__id, style.encode())
c_lib.ui_set_caption(self.__id, caption.encode())
c_lib.ui_set_size(self.__id, self.__w, self.__h)
c_lib.ui_set_pivot(self.__id, self.__px, self.__py)
if ignore_input:
c_lib.ui_set_ignore_input(self.__id, True)
if no_background:
c_lib.ui_set_background(self.__id, False)
def enable_callback(enable):
c_lib.ui_set_enabled(self.__id, enable)
super().__init__(c_lib.ui_get_origin(self.__id), enable_callback)
def __del__(self):
c_lib.ui_remove(self.__id)
super().__del__()
@staticmethod
def set_font(resname, size, scale, additional_gliphs = None):
if additional_gliphs is None:
additional_gliphs = ""
return c_lib.ui_set_font(resname.encode(), size, scale, additional_gliphs.encode())
@property
def px(self):
return self.__px
@px.setter
def px(self, v):
self.__px = v
c_lib.ui_set_pivot(self.__id, self.__px, self.__py)
@property
def py(self):
return self.__py
@py.setter
def py(self, v):
self.__py = v
c_lib.ui_set_pivot(self.__id, self.__px, self.__py)
@property
def w(self):
return self.__w
@w.setter
def w(self, v):
self.__w = v
c_lib.ui_set_size(self.__id, self.__w, self.__h)
@property
def h(self):
return self.__h
@h.setter
def h(self, v):
self.__h = v
c_lib.ui_set_size(self.__id, self.__w, self.__h)
def add_text(self, text, color = None):
if color is not None:
raise NotImplementedError
id = c_lib.ui_add_text(self.__id, text.encode())
return ui_label(self, id, text)
def add_btn(self, text, callback = None):
id = c_lib.ui_add_btn(self.__id, text.encode(), self._add_callback(callback))
return ui_btn(self, id, text)
def add_checkbox(self, text, callback = None, radio = False):
id = c_lib.ui_add_checkbox(self.__id, text.encode(), self._add_callback(callback), radio)
return ui_checkbox(self, id, text)
def add_slider(self, text, callback = None, f = 0.0, t = 1.0):
id = c_lib.ui_add_slider(self.__id, text.encode(), self._add_callback(callback), f, t)
return ui_slider(self, id, text)
def add_coloredit(self, text, callback = None, alpha = False):
id = c_lib.ui_add_coloredit(self.__id, text.encode(), self._add_callback(callback), alpha)
return ui_coloredit(self, id, text)
def add_dropdown(self, items, callback = None):
items_encoded = [itm.encode() for itm in items]
arr = (ctypes.c_char_p * len(items_encoded))()
arr[:] = items_encoded
id = c_lib.ui_add_dropdown(self.__id, self._add_callback(callback), arr, len(items_encoded))
return ui_list(self, id, items)
def add_listbox(self, items, callback = None):
items_encoded = [itm.encode() for itm in items]
arr = (ctypes.c_char_p * len(items_encoded))()
arr[:] = items_encoded
id = c_lib.ui_add_listbox(self.__id, self._add_callback(callback), arr, len(items_encoded))
return ui_list(self, id, items)
def add_tab(self, text):
c_lib.ui_add_tab(self.__id, text.encode())
def add_spacing(self, separator = False):
c_lib.ui_add_spacing(self.__id, separator)
def add_hlayout(self, count):
c_lib.ui_add_hlayout(self.__id, count)
def add_scroll(self, count = -1):
c_lib.ui_add_scroll(self.__id, count)
def _add_callback(self, callback):
if callback is None:
return -1
c = c_callback(callback)
self.__callbacks.append(c)
return c.id
| StarcoderdataPython |
8022868 | from datetime import datetime
import json
import mock
import pytest
import responses
from api.share.utils import update_share
from api_tests.utils import create_test_file
from framework.auth.core import Auth
from osf.models.spam import SpamStatus
from osf.utils.permissions import READ, WRITE, ADMIN
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintFactory,
PreprintProviderFactory,
)
from website import settings
from website.preprints.tasks import on_preprint_updated
@pytest.mark.django_db
@pytest.mark.enable_enqueue_task
class TestPreprintShare:
@pytest.fixture
def user(self):
return AuthUserFactory()
@pytest.fixture
def auth(self, user):
return Auth(user=user)
@pytest.fixture
def provider(self):
return PreprintProviderFactory(
name='<NAME> Snowmobiling Experience',
access_token='<PASSWORD>'
)
@pytest.fixture
def project(self, user, mock_share):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture
def subject(self):
return SubjectFactory(text='Subject #1')
@pytest.fixture
def subject_two(self):
return SubjectFactory(text='Subject #2')
@pytest.fixture
def file(self, project, user):
return create_test_file(project, user, 'second_place.pdf')
@pytest.fixture
def preprint(self, project, user, provider, subject):
return PreprintFactory(
creator=user,
filename='second_place.pdf',
provider=provider,
subjects=[[subject._id]],
project=project,
is_published=False
)
def test_save_unpublished_not_called(self, mock_share, preprint):
mock_share.reset() # if the call is not made responses would raise an assertion error, if not reset.
preprint.save()
assert not len(mock_share.calls)
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_published_called(self, mock_on_preprint_updated, preprint, user, auth):
preprint.set_published(True, auth=auth, save=True)
assert mock_on_preprint_updated.called
# This covers an edge case where a preprint is forced back to unpublished
# that it sends the information back to share
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_unpublished_called_forced(self, mock_on_preprint_updated, auth, preprint):
preprint.set_published(True, auth=auth, save=True)
preprint.is_published = False
preprint.save(**{'force_update': True})
assert mock_on_preprint_updated.call_count == 2
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_published_subject_change_called(self, mock_on_preprint_updated, auth, preprint, subject, subject_two):
preprint.is_published = True
preprint.set_subjects([[subject_two._id]], auth=auth)
assert mock_on_preprint_updated.called
call_args, call_kwargs = mock_on_preprint_updated.call_args
assert 'old_subjects' in mock_on_preprint_updated.call_args[1]
assert call_kwargs.get('old_subjects') == [subject.id]
assert [subject.id] in mock_on_preprint_updated.call_args[1].values()
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_save_unpublished_subject_change_not_called(self, mock_on_preprint_updated, auth, preprint, subject_two):
preprint.set_subjects([[subject_two._id]], auth=auth)
assert not mock_on_preprint_updated.called
def test_send_to_share_is_true(self, mock_share, preprint):
on_preprint_updated(preprint._id)
data = json.loads(mock_share.calls[-1].request.body.decode())
assert data['data']['attributes']['data']['@graph']
assert mock_share.calls[-1].request.headers['Authorization'] == 'Bearer Snowmobiling'
@mock.patch('osf.models.preprint.update_or_enqueue_on_preprint_updated')
def test_preprint_contributor_changes_updates_preprints_share(self, mock_on_preprint_updated, user, file, auth):
preprint = PreprintFactory(is_published=True, creator=user)
assert mock_on_preprint_updated.call_count == 2
user2 = AuthUserFactory()
preprint.primary_file = file
preprint.add_contributor(contributor=user2, auth=auth, save=True)
assert mock_on_preprint_updated.call_count == 5
preprint.move_contributor(contributor=user, index=0, auth=auth, save=True)
assert mock_on_preprint_updated.call_count == 7
data = [{'id': user._id, 'permissions': ADMIN, 'visible': True},
{'id': user2._id, 'permissions': WRITE, 'visible': False}]
preprint.manage_contributors(data, auth=auth, save=True)
assert mock_on_preprint_updated.call_count == 9
preprint.update_contributor(user2, READ, True, auth=auth, save=True)
assert mock_on_preprint_updated.call_count == 11
preprint.remove_contributor(contributor=user2, auth=auth)
assert mock_on_preprint_updated.call_count == 13
def test_call_async_update_on_500_failure(self, mock_share, preprint):
mock_share.replace(responses.POST, f'{settings.SHARE_URL}api/v2/normalizeddata/', status=500)
mock_share._calls.reset() # reset after factory calls
update_share(preprint)
assert len(mock_share.calls) == 6 # first request and five retries
data = json.loads(mock_share.calls[0].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
data = next(data for data in graph if data['@type'] == 'preprint')
assert data['title'] == preprint.title
data = json.loads(mock_share.calls[-1].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
data = next(data for data in graph if data['@type'] == 'preprint')
assert data['title'] == preprint.title
def test_no_call_async_update_on_400_failure(self, mock_share, preprint):
mock_share.replace(responses.POST, f'{settings.SHARE_URL}api/v2/normalizeddata/', status=400)
mock_share._calls.reset() # reset after factory calls
update_share(preprint)
assert len(mock_share.calls) == 1
data = json.loads(mock_share.calls[0].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
data = next(data for data in graph if data['@type'] == 'preprint')
assert data['title'] == preprint.title
def test_delete_from_share(self, mock_share):
preprint = PreprintFactory()
update_share(preprint)
data = json.loads(mock_share.calls[-1].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
share_preprint = next(n for n in graph if n['@type'] == 'preprint')
assert not share_preprint['is_deleted']
preprint.date_withdrawn = datetime.now()
update_share(preprint)
data = json.loads(mock_share.calls[-1].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
share_preprint = next(n for n in graph if n['@type'] == 'preprint')
assert not share_preprint['is_deleted']
preprint.spam_status = SpamStatus.SPAM
update_share(preprint)
data = json.loads(mock_share.calls[-1].request.body.decode())
graph = data['data']['attributes']['data']['@graph']
share_preprint = next(n for n in graph if n['@type'] == 'preprint')
assert share_preprint['is_deleted']
| StarcoderdataPython |
5019952 | <filename>Beginner/03. Python/Kadane.py
# implements the kadane algorithm
# https://en.wikipedia.org/wiki/Maximum_subarray_problem
def kadane(l): # max_ending_here, max_so_far
meh, msf = 0, 0
a, b = 0, 0
for i, x in enumerate(l):
meh += x
if meh < 0:
meh = 0
a = i + 1
if msf < meh:
msf = meh
b = i
return msf, a, b
# printing an example of the algorithm
m, a, b = kadane([-2, 1, -3, 4, -1, 2, 1, -5, 4])
print(m, (a, b)) # 6 3 6 (i.e. 4, -1, 2, 1)
| StarcoderdataPython |
109559 | <filename>src/fibonacci.py
def main():
print("Iterative:")
print("n=0 ->", fibonacci_iterative(0))
print("n=1 ->", fibonacci_iterative(1))
print("n=6 ->", fibonacci_iterative(6))
print()
print("Recursive:")
print("n=0 ->", fibonacci_recursive(0))
print("n=1 ->", fibonacci_recursive(1))
print("n=6 ->", fibonacci_recursive(6))
def fibonacci_iterative(n):
fib = [0, 1]
if n < 2:
return fib[n]
for i in range(n - 1):
fib.append(fib[i] + fib[i + 1])
return fib[n]
def fibonacci_recursive(n):
if n < 2:
return n
return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
main()
| StarcoderdataPython |
3262136 | <filename>version_0.2.0/static_tpo_v2.py
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 07:02:43 2020
@author: alex1
twitter.com/beinghorizontal
"""
import pandas as pd
import plotly.graph_objects as go
from tpo_helper2 import get_ticksize, abc, get_mean, get_rf, get_context, get_dayrank, get_ibrank
import numpy as np
from datetime import timedelta
from plotly.offline import plot
# read file and set datetime index
dfhist = pd.read_csv(r'C:\Users\alex1\Dropbox\scripts\tpo_v2\history.txt')
dfhist.iloc[:, 2:] = dfhist.iloc[:, 2:].apply(pd.to_numeric)
def datetime(data):
"""
dfhist : pandas series
Convert date time to pandas date time
Returns dataframe with datetime index
"""
dfhist = data.copy()
dfhist['datetime2'] = pd.to_datetime(dfhist['datetime'], format='%Y%m%d %H:%M:%S')
dfhist = dfhist.set_index(dfhist['datetime2'], drop=True, inplace=False)
return dfhist
dfhist = datetime(dfhist)
# manual parameters
freq = 30
avglen = 10 # num days mean to get values
days_to_display = 10 # Number of last n days you want on the screen to display
mode = 'tpo' # for volume --> 'vol'
# dynamic parameters based on data & mean values for volume and Rotational Factor (RF)
ticksz = get_ticksize(dfhist, freq=freq)
symbol = dfhist.symbol[0]
mean_val = get_mean(dfhist, avglen=avglen, freq=freq)
trading_hr = mean_val['session_hr']
# !!! get rotational factor again for 30 min resampled data
dfhist = get_rf(dfhist.copy())
# !!! resample to desire time frequency. For TPO charts 30 min is optimal
dfresample = dfhist.copy() # create seperate resampled data frame and preserve old 1 min file
dfresample = dfresample.resample(str(freq)+'min').agg({'symbol': 'last', 'datetime': 'last', 'Open': 'first', 'High': 'max',
'Low': 'min', 'Close': 'last', 'Volume': 'sum', 'rf': 'sum'})
dfresample = dfresample.dropna()
# slice df based on days_to_display parameter
dt1 = dfresample.index[-1]
sday1 = dt1 - timedelta(days_to_display)
dfresample = dfresample[(dfresample.index.date > sday1.date())]
# !!! split the dataframe with new date
DFList = [group[1] for group in dfresample.groupby(dfresample.index.date)]
# !!! for context based bubbles at the top with text hovers
dfcontext = get_context(dfresample, freq=freq, ticksize=ticksz, style=mode, session_hr=trading_hr)
# get market profile DataFrame and ranking as a series for each day.
# @todo: IN next version, display the ranking DataFrame with drop-down menu
dfmp_list = dfcontext[0]
df_distribution = dfcontext[1]
df_ranking = get_dayrank(df_distribution.copy(), mean_val)
ranking = df_ranking[0]
power1 = ranking.power1 # Non-normalised IB strength
power = ranking.power # Normalised IB strength for dynamic shape size for markers at bottom
breakdown = df_ranking[1]
dh_list = ranking.highd
dl_list = ranking.lowd
# !!! get context based on IB It is predictive value caculated by using various IB stats and previous day's value area
# IB is 1st 1 hour of the session. Not useful for scrips with global 24 x 7 session
context_ibdf = get_ibrank(mean_val, ranking)
ibpower1 = context_ibdf[0].ibpower1 # Non-normalised IB strength
ibpower = context_ibdf[0].IB_power # Normalised IB strength for dynamic shape size for markers at bottom
ibbreakdown = context_ibdf[1]
ib_high_list = context_ibdf[0].ibh
ib_low_list = context_ibdf[0].ibl
fig = go.Figure(data=[go.Candlestick(x=dfresample['datetime'],
open=dfresample['Open'],
high=dfresample['High'],
low=dfresample['Low'],
close=dfresample['Close'],
showlegend=True,
name=symbol, opacity=0.3)]) # To make candlesticks more prominent increase the opacity
# !!! get TPO for each day
for i in range(len(dfmp_list)): # test the loop with i=1
# df1 is used for datetime axis, other dataframe we have is df_mp but it is not a timeseries
df1 = DFList[i].copy()
df_mp = dfmp_list[i]
irank = ranking.iloc[i] # select single row from ranking df
# df_mp['i_date'] = df1['datetime'][0]
df_mp['i_date'] = irank.date
# # @todo: background color for text
df_mp['color'] = np.where(np.logical_and(
df_mp['close'] > irank.vallist, df_mp['close'] < irank.vahlist), 'green', 'white')
df_mp = df_mp.set_index('i_date', inplace=False)
fig.add_trace(go.Scattergl(x=df_mp.index, y=df_mp.close, mode="text", name=str(df_mp.index[0]), text=df_mp.alphabets,
showlegend=False, textposition="top right", textfont=dict(family="verdana", size=6, color=df_mp.color)))
#power1 = int(irank['power1']) # non normalized strength
#power = int(irank['power'])
if power1[i] < 0:
my_rgb = 'rgba({power}, 3, 252, 0.5)'.format(power=abs(165))
else:
my_rgb = 'rgba(23, {power}, 3, 0.5)'.format(power=abs(252))
brk_f_list_maj = []
f = 0
for f in range(len(breakdown.columns)):
brk_f_list_min=[]
for index, rows in breakdown.iterrows():
brk_f_list_min.append(index+str(': ')+str(rows[f])+'<br />')
brk_f_list_maj.append(brk_f_list_min)
breakdown_values ='' # for bubbles
for st in brk_f_list_maj[i]:
breakdown_values += st
# .........................
ibrk_f_list_maj = []
g = 0
for g in range(len(ibbreakdown.columns)):
ibrk_f_list_min=[]
for index, rows in ibbreakdown.iterrows():
ibrk_f_list_min.append(index+str(': ')+str(rows[g])+'<br />')
ibrk_f_list_maj.append(ibrk_f_list_min)
ibreakdown_values = '' # for squares
for ist in ibrk_f_list_maj[i]:
ibreakdown_values += ist
# irank.power1
# ..................................
fig.add_trace(go.Scattergl(
# x=[df1.iloc[4]['datetime']],
x=[irank.date],
y=[dfresample['High'].max()],
mode="markers",
marker=dict(color=my_rgb, size=0.90*power[i],
line=dict(color='rgb(17, 17, 17)', width=2)),
# marker_symbol='square',
hovertext=['<br />Insights:<br />VAH: {}<br /> POC: {}<br /> VAL: {}<br /> Balance Target: {}<br /> Day Type: {}<br />strength: {}<br />BreakDown: {}<br />{}<br />{}'.format(irank.vahlist,
irank.poclist, irank.vallist,irank.btlist, irank.daytype, irank.power,'','-------------------',breakdown_values)], showlegend=False))
# !!! we will use this for hover text at bottom for developing day
if ibpower1[i] < 0:
ib_rgb = 'rgba(165, 3, 252, 0.5)'
else:
ib_rgb = 'rgba(23, 252, 3, 0.5)'
fig.add_trace(go.Scattergl(
# x=[df1.iloc[4]['datetime']],
x=[irank.date],
y=[dfresample['Low'].min()],
mode="markers",
marker=dict(color=ib_rgb, size=0.40 * ibpower[i], line=dict(color='rgb(17, 17, 17)', width=2)),
marker_symbol='square',
hovertext=['<br />Insights:<br />Vol_mean: {}<br /> Vol_Daily: {}<br /> RF_mean: {}<br /> RF_daily: {}<br /> IBvol_mean: {}<br /> IBvol_day: {}<br /> IB_RFmean: {}<br /> IB_RFday: {}<br />strength: {}<br />BreakDown: {}<br />{}<br />{}'.format(mean_val['volume_mean'],irank.volumed, mean_val['rf_mean'],irank.rfd,
mean_val['volib_mean'], irank.ibvol, mean_val['ibrf_mean'],irank.ibrf, ibpower[i],'','......................',ibreakdown_values)],showlegend=False))
# @todo: add ib high, low, hd, hl as vertical line at start of each day's start just before above TPOs ib_high_list[i],ib_low_list[i],dh_list[i], dl_list[i]
lvns = irank.lvnlist
for lvn in lvns:
if lvn > irank.vallist and lvn < irank.vahlist:
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=lvn,
x1=df1.iloc[5]['datetime'],
y1=lvn,
line=dict(
color="darksalmon",
width=2,
dash="dashdot",),)
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=ib_low_list[i],
x1=df1.iloc[0]['datetime'],
y1=ib_high_list[i],
line=dict(
color="cyan",
width=3,
),)
# day high and low
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=dl_list[i],
x1=df1.iloc[0]['datetime'],
y1=dh_list[i],
line=dict(
color="gray",
width=1,
dash="dashdot",),)
ltp = dfresample.iloc[-1]['Close']
if ltp >= irank.poclist:
ltp_color = 'green'
else:
ltp_color = 'red'
fig.add_trace(go.Scatter(
x=[df1.iloc[-1]['datetime']],
y=[df1.iloc[-1]['Close']],
mode="text",
name="last traded price",
text=['last '+str(df1.iloc[-1]['Close'])],
textposition="bottom right",
textfont=dict(size=11, color=ltp_color),
showlegend=False
))
fig.layout.xaxis.color = 'white'
fig.layout.yaxis.color = 'white'
fig.layout.autosize = True
fig["layout"]["height"] = 650
# fig.layout.hovermode = 'x'
fig.update_xaxes(title_text='Time', title_font=dict(size=18, color='white'),
tickangle=45, tickfont=dict(size=8, color='white'), showgrid=False, dtick=len(dfmp_list))
fig.update_yaxes(title_text=symbol, title_font=dict(size=18, color='white'),
tickfont=dict(size=12, color='white'), showgrid=False)
fig.layout.update(template="plotly_dark", title="@"+abc()[1], autosize=True,
xaxis=dict(showline=True, color='white'), yaxis=dict(showline=True, color='white',autorange= True,fixedrange=False))
fig["layout"]["xaxis"]["rangeslider"]["visible"] = False
fig["layout"]["xaxis"]["tickformat"] = "%H:%M:%S"
# fig.write_html('tpo.html') # uncomment to save as html
# To save as png
# from kaleido.scopes.plotly import PlotlyScope # pip install kaleido
# scope = PlotlyScope()
# with open("figure.png", "wb") as f:
# f.write(scope.transform(fig, format="png"))
plot(fig, auto_open=True)
fig.show()
| StarcoderdataPython |
273718 | from panda3d.core import VBase4
class Debug:
# XY indicator on stem
x = VBase4(1, 0, 0, 1)
y = VBase4(0, 1, 0, 1)
xyz_at_top = True # If False, indicators are at the stem bottom.
# skeleton geometry
stem = VBase4(1, 1, 1, 1) # Central axis of the segment
ring_segs = 10 # Must be set if either ring or bark are used.
ring = VBase4(0.7, 0.7, 0.7, 1) # Color of the horizontal ring around the segment's base
bark = False # Line art to mock up bark.
class Skeleton:
# XY indicator on stem
x = False
y = False
xyz_at_top = True
# skeleton geometry
stem = VBase4(1, 1, 1, 1) # Central axis of the segment
ring_segs = 0
ring = False
bark = False # Line art to mock up bark.
class Bark:
x = False
y = False
xyz_at_top = False
stem = False
ring_segs = 10
ring = False
bark = VBase4(0.4, 0.1, 0.1, 1)
| StarcoderdataPython |
3514199 | <filename>ecoli_in_pipe/wrapper_head_tail.py<gh_stars>1-10
import sys
import petsc4py
petsc4py.init(sys.argv)
from ecoli_in_pipe import head_tail
# import numpy as np
# from scipy.interpolate import interp1d
# from petsc4py import PETSc
# from ecoli_in_pipe import single_ecoli, ecoliInPipe, head_tail, ecoli_U
# from codeStore import ecoli_common
#
#
# def call_head_tial(uz_factor=1., wz_factor=1.):
# PETSc.Sys.Print('')
# PETSc.Sys.Print('################################################### uz_factor = %f, wz_factor = %f' %
# (uz_factor, wz_factor))
# t_head_U = head_U.copy()
# t_tail_U = tail_U.copy()
# t_head_U[2] = t_head_U[2] * uz_factor
# t_tail_U[2] = t_tail_U[2] * uz_factor
# # C1 = t_head_U[5] - t_tail_U[5]
# # C2 = t_head_U[5] / t_tail_U[5]
# # t_head_U[5] = wz_factor * C1 * C2 / (wz_factor * C2 - 1)
# # t_tail_U[5] = C1 / (wz_factor * C2 - 1)
# t_head_U[5] = wz_factor * t_head_U[5]
# t_kwargs = {'head_U': t_head_U,
# 'tail_U': t_tail_U, }
# total_force = head_tail.main_fun()
# return total_force
#
#
# OptDB = PETSc.Options()
# fileHandle = OptDB.getString('f', 'ecoliInPipe')
# OptDB.setValue('f', fileHandle)
# main_kwargs = {'fileHandle': fileHandle}
# # head_U, tail_U, ref_U = ecoli_common.ecoli_restart(**main_kwargs)
# # ecoli_common.ecoli_restart(**main_kwargs)
# head_U = np.array([0, 0, 1, 0, 0, 1])
# tail_U = np.array([0, 0, 1, 0, 0, 1])
# call_head_tial()
head_tail.main_fun()
| StarcoderdataPython |
1693564 | from django.db import models
from .models import User
class Patient(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
name = models.CharField(max_length=50, default="")
country = models.CharField(max_length=50, default="")
address = models.CharField(max_length=100, default="")
zipcode = models.CharField(max_length=50, default="")
phone_number = models.CharField(max_length=20, default="")
information = models.CharField(max_length=100, blank=True)
image = models.FileField(upload_to='users/', blank=True, default='users/no-img.svg')
| StarcoderdataPython |
3446777 | <filename>core/userauthorization.py
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from core.pandajob.models import MonitorUsers
from django.core.cache import cache
from django.db.models import Q
import logging
import subprocess
import datetime
userToRunVoms = 'atlpan'
class processAuth(object):
def process_request(self, request):
data = {'debug':'no'}
if 'SSL_CLIENT_S_DN' in request.META or 'HTTP_X_SSL_CLIENT_S_DN' in request.META:
if 'SSL_CLIENT_S_DN' in request.META:
userdn = request.META['SSL_CLIENT_S_DN']
else:
userdn = request.META['HTTP_X_SSL_CLIENT_S_DN']
proc = subprocess.Popen(['/usr/bin/openssl', 'x509', '-email', '-noout'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
certificate_email, stderr = proc.communicate(input=request.META['SSL_CLIENT_CERT'])
if ( (len(userdn) > 5) and (len(certificate_email) > 5)):
userrec = MonitorUsers.objects.filter( Q(dname__startswith=userdn) | Q(email=certificate_email.lower()), isactive=1).values()
else:
render_to_response('errorAuth.html', data, RequestContext(request))
if len(userrec) > 0:
return None
else:
theListOfVMUsers = cache.get('voms-users-list')
if (theListOfVMUsers is None) or (len(theListOfVMUsers) == 0):
proc = subprocess.Popen('sudo -u atlpan /usr/bin/voms-admin --host lcg-voms2.cern.ch --vo atlas list-users', shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
theListOfVMUsers, stderr = proc.communicate()
if len(theListOfVMUsers) < 20 :
logging.error('Error of getting list of users (voms-admin). stderr:' + theListOfVMUsers +" "+ stderr)
return render_to_response('errorAuth.html', data, RequestContext(request))
cache.set('voms-users-list', theListOfVMUsers, 1800)
if ( (len(userdn) > 5) and (len(certificate_email) > 5)):
logging.error('authorization info: Started Compare, theListOfVMUsers.find(userdn):' + str(theListOfVMUsers.find(userdn)) + ' (theListOfVMUsers.find(certificate_email)' + str(theListOfVMUsers.find(certificate_email)))
if ((theListOfVMUsers.find(userdn) > 0) or (theListOfVMUsers.lower().find(certificate_email.lower()) > 0)):
newUser = MonitorUsers(dname=userdn, isactive=1, firstdate=datetime.datetime.utcnow().strftime("%Y-%m-%d"), email=certificate_email.lower())
newUser.save()
return None
else:
return render_to_response('errorAuth.html', data, RequestContext(request))
return render_to_response('errorAuth.html', data, RequestContext(request))
# else:
# return render_to_response('errorAuth.html', RequestContext(request))
| StarcoderdataPython |
3234926 | <reponame>Brun0C/projeto_python
def cadastrar_funcionario():
print('Operação realizada com sucesso')
def listar_funcionarios():
print('Operação realizada com sucesso')
| StarcoderdataPython |
4864097 | <filename>middleware.py<gh_stars>0
"""
Move this to wherever once we establish a setup for the Application Framework
"""
import re
from collections import deque
from typing import Callable, Deque
from time import perf_counter
from cloudcix_metrics import prepare_metrics
from django.http.response import HttpResponseBase
from rest_framework.request import Request
from metrics.client_ip import post_client_ip
from metrics.response_time import post_response_time
GetResponseType = Callable[[Request], HttpResponseBase]
class OpenAPIDeepObjectParserMiddleware:
"""
This middleware will transform the GET parameters received from the user and turn any OAPI deepObject types into
nested dictionaries
OAPI deepObject example: ?search[name]=yes&exclude[name]=no
Turning these into nested dictionaries makes the search and exclude validation a lot easier
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
self.pattern = re.compile(r'(?P<dict>[a-zA-z][a-zA-Z0-9]+)\[(?P<key>.+)\]')
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
# Before we pass on the request, we should alter the GET params
# Find all deepObject style params and transform them
new_get = request.GET.copy()
transformed: Deque = deque()
for k in request.GET.keys():
match = self.pattern.match(k)
if match:
# Attempt to get the named dict
new_get.setdefault(match['dict'], {})[match['key']] = request.GET[k]
transformed.append(k)
for k in transformed:
new_get.pop(k)
request.GET = new_get
# Now pass the request to the next part of the chain and return what
# comes back
return self.get_response(request)
class MetricsMiddleware:
"""
This middleware will handle the generation and logging of response time metrics to our influx instance
"""
def __init__(self, get_response: GetResponseType) -> None:
"""
Set up this middleware class
:param get_response: A callable that calls the next part in the chain.
This might be another middleware or the view itself.
"""
self.get_response = get_response
def __call__(self, request: Request) -> HttpResponseBase:
"""
This method is run when the middleware is called by Django to deal with metrics
:param request: The current request object passed from the last part of the chain
:returns: The response to be returned to the user
"""
start = perf_counter()
response = self.get_response(request)
time = perf_counter() - start
prepare_metrics(post_response_time, time=time, response=response, request=request)
prepare_metrics(post_client_ip, response=response, request=request)
return response
| StarcoderdataPython |
3319016 | <gh_stars>1-10
import numpy as np
from mz import base
from mz import sources # Import to populate helpers._MARKED_CLASSES.
from mz import filters # Import to populate helpers._MARKED_CLASSES.
from mz import helpers
import pytest
# Fixtures
# ------------------------------------------------------------------------------
@pytest.fixture(params=helpers.iter_marked_classes(),
ids=lambda marked_class: marked_class.name)
def marked_class(request):
return request.param
@pytest.fixture(params=[128, 2048, 1234], ids=lambda s: f"num_samples={s}")
def num_samples(request):
return request.param
@pytest.fixture()
def clock_signal(num_samples):
clock = base.Clock(num_samples=num_samples)
clock_signal = clock()
assert clock_signal.num_samples == num_samples
return clock_signal
# Actual Test
# ------------------------------------------------------------------------------
def test_marked_modules(marked_class: helpers.MarkedClass, clock_signal):
"""Instantiates and calls all marked modules."""
# Make sure we get a fresh instance.
marked_module_instance = marked_class.get_instance()
_ = marked_module_instance(clock_signal)
| StarcoderdataPython |
1675151 | import tfm__unit_interval
import tfm__2D_rows_sum_to_one__log
import tfm__2D_rows_sum_to_one__stickbreak
# Make a few functions easily available
logistic_sigmoid = tfm__unit_interval.logistic_sigmoid
inv_logistic_sigmoid = tfm__unit_interval.inv_logistic_sigmoid
from log_logistic_sigmoid import log_logistic_sigmoid | StarcoderdataPython |
9771926 | import movie, fresh_tomatoes
# instances of Movie class
jurassic_world = movie.Movie("Jurassic World",
"Twenty-two years after the events of Jurassic Park, Isla Nublar now \
features a fully functioning dinosaur theme park, Jurassic World, as \
originally envisioned by <NAME>...",
"Jun 12, 2015",
"4",
"images/jurassic_world.jpg",
True,
"images/jurassic_world_poster.jpg",
"https://www.youtube.com/watch?v=xhQKwJFRL1g")
avengers = movie.Movie("Avengers: Age of Ultron",
"When <NAME> and <NAME> try to jump-start a dormant peacekeeping \
program called Ultron, things go horribly wrong and it's up to Earth's \
Mightiest Heroes to stop the villainous Ultron from enacting his terrible \
plans.",
"Apr 13, 2015",
"4",
"images/avengers.jpg",
True,
"images/avengers_poster.jpg",
"https://www.youtube.com/watch?v=tmeOjFno6Do")
san_andreas = movie.Movie("San Andreas",
"In the aftermath of a massive earthquake in California, a rescue-chopper \
pilot makes a dangerous journey across the state in order to rescue his \
daughter.",
"May 29, 2015",
"3",
"images/san_andreas.jpg",
False,
"",
"https://www.youtube.com/watch?v=yftHosO0eUo")
entourage = movie.Movie("Entourage",
"Movie star <NAME>, together with his boys Eric, Turtle, and \
Johnny, are back - and back in business with super agent-turned-studio \
head <NAME> on a risky project that will serve as Vince's directorial \
debut.",
"Jun 03, 2015",
"4",
"images/entourage.jpg",
False,
"",
"https://www.youtube.com/watch?v=SGSE_XPF4_g")
mad_max = movie.Movie("Mad Max: Fury Road",
"In a stark desert landscape where humanity is broken, two rebels \
just might be able to restore order: Max, a man of action and of \
few words, and Furiosa, a woman of action who is looking to make \
it back to her childhood homeland.",
"May 07, 2015",
"5",
"images/mad_max.jpg",
False,
"",
"https://www.youtube.com/watch?v=hEJnMQG9ev8")
guardians = movie.Movie("Guardians of the Galaxy",
"A group of intergalactic criminals are forced to work together to \
stop a fanatical warrior from taking control of the universe.",
"Jul 21, 2015",
"5",
"images/guardians_galaxy.jpg",
True,
"images/guardians_galaxy_poster.jpg",
"https://www.youtube.com/watch?v=d96cjJhvlMA")
tomorrowland = movie.Movie("Tomorrowland",
"Bound by a shared destiny, a teen bursting with scientific curiosity \
and a former boy-genius inventor embark on a mission to unearth the \
secrets of a place somewhere in time and space that exists in their \
collective memory.",
"May 09, 2015",
"3",
"images/tomorrowland.jpg",
False,
"",
"https://www.youtube.com/watch?v=Vjx7wxjCv9A")
# save all movies into a list
movies = [jurassic_world, avengers, san_andreas, entourage, mad_max, guardians, tomorrowland]
# generate html page to show all movies
fresh_tomatoes.open_movies_page(movies) | StarcoderdataPython |
3332184 | import operator
from OOSML import SmlObject, SmlPredicate
import Python_sml_ClientInterface as sml
def iterator_is_empty(iter):
try:
iter.next()
except StopIteration:
return True
return False
def output_event_handler(id, userData, kernel, runFlags):
userData.update()
def init_event_handler(id, userData, agent):
userData.init()
class PddlBaseEnv:
def __init__(self, agent):
self.agent = agent
kernel = agent.GetKernel()
self.__update_event_id = kernel.RegisterForUpdateEvent(sml.smlEVENT_AFTER_ALL_OUTPUT_PHASES, output_event_handler, self)
self.__reinit_event_id = kernel.RegisterForAgentEvent(sml.smlEVENT_AFTER_AGENT_REINITIALIZED, init_event_handler, self)
# A dict from predicate names to dicts, where each value dict is a map
# from tuples of object refs (representing the parameters of a
# particular instance of the predicate) to SmlPredicate references
self.predicates = {}
# dict from object ID strings to their SmlObject references
self.objects = {}
# dict from object ID strings to their types
self.types = {}
# if we initialize from a static state representation, then we should
# be able to reinit to it. So assign this variable to the static rep.
self.init_state = None
# We can't add and delete predicates immediately while handling an
# action, since effects listed earlier could interfere with conditional
# effects listed later. So maintain add and delete buffers
self.predicate_add_buffer = []
self.predicate_del_buffer = []
self.state_wme = self.agent.CreateIdWME(self.agent.GetInputLink(), 'state')
self.entity_count_wme = self.agent.CreateIntWME(self.agent.GetInputLink(), 'entity-count', 0)
self.num_updates_wme = self.agent.CreateIntWME(self.agent.GetInputLink(), 'num-updates', 0)
self.agent.Commit()
self.num_updates = 0
def init(self):
if not self.init_state:
raise NotImplementedError, "No initial state defined"
self.num_updates = 0
self.from_static(self.init_state)
def destroy_objs_preds(self):
for name, pmap in self.predicates.items():
for pred in pmap.values():
pred.destroy()
self.predicates[name] = {}
for obj in self.objects.values():
obj.destroy()
self.objects = {}
self.types = {}
def destroy(self):
self.agent.GetKernel().UnregisterForUpdateEvent(self.__update_event_id)
self.agent.GetKernel().UnregisterForAgentEvent(self.__reinit_event_id)
self.destroy_objs_preds()
self.agent.DestroyWME(self.state_wme)
self.agent.DestroyWME(self.entity_count_wme)
def update(self):
for i in range(self.agent.GetNumberCommands()):
cmd = self.agent.GetCommand(i)
if cmd.GetParameterValue('status') == None:
handler = getattr(self, 'handle_action_%s' % cmd.GetCommandName().strip('|'))
if handler:
err_msg = handler(cmd)
if err_msg:
self.agent.CreateStringWME(cmd, 'status', 'error')
self.agent.CreateStringWME(cmd, 'message', err_msg)
else:
cmd.AddStatusComplete()
self.agent.Commit()
def buffer_predicate_add(self, pred_name, *params):
self.predicate_add_buffer.append((pred_name, tuple(params)))
def buffer_predicate_delete(self, pred_name, *params):
self.predicate_del_buffer.append((pred_name, tuple(params)))
def do_buffered_actions(self):
for pred_name, params in self.predicate_add_buffer:
add_method = getattr(self, 'add_predicate_%s' % pred_name)
if not add_method:
raise Exception('Illegal predicate %s' % pred_name)
add_method(*params)
for pred_name, params in self.predicate_del_buffer:
pred = self.predicates[pred_name].pop(params, None)
# I used to assume that the predicate being falsified must be
# currently true, but that doesn't seem to be the case with
# D1S1.pddl. So if a predicate is already not true, I'm just going
# to do nothing.
if pred:
pred.destroy()
self.predicate_add_buffer = []
self.predicate_del_buffer = []
self.agent.Update(self.entity_count_wme, sum(len(x) for x in self.predicates.values()) + len(self.objects))
self.num_updates += 1
self.agent.Update(self.num_updates_wme, self.num_updates)
self.agent.Commit()
# Creates a static, recoverable, and comparable representation of the
# current state, suitable for pickling.
def get_static(self):
# map from object refs to id strings
obj_lookup = dict(reversed(x) for x in self.objects.items())
# The predicates dict is transformed into a list of pairs
# (predicate name, frozensets of tuples of ID strings)
# the list is sorted by predicate name
predicate_table = []
for predname in sorted(self.predicates.keys()):
relmap = self.predicates[predname]
true_set = frozenset(tuple(obj_lookup[y] for y in x) for x in relmap.keys())
predicate_table.append((predname, true_set))
# the type dict is changed into a list of pairs (object id, type),
# sorted by object id
type_table = tuple(sorted(self.types.items(), key=operator.itemgetter(0)))
return (type_table, tuple(predicate_table))
# Reconstruct the state from the static representation
def from_static(self, static_rep):
self.init_state = static_rep
self.destroy_objs_preds()
types_table, predicate_table = static_rep
self.types = dict(types_table)
for obj_id, obj_type in types_table:
self.objects[obj_id] = SmlObject(self.agent, obj_type, obj_id)
for predname, true_set in predicate_table:
# These methods should be defined by the domain-specific subclass
predicate_add_method = getattr(self, 'add_predicate_%s' % predname)
if not predicate_add_method:
raise Exception('Static representation contains an illegal predicate "%s"' % predname)
args_to_rels = {}
for s in true_set:
obj_params = [self.objects[i] for i in s]
predicate_add_method(*obj_params)
| StarcoderdataPython |
372098 | <reponame>pranavashok/personal-finance
from datetime import datetime, timedelta
from pf.goals import MonthlyGoal
from pf.goals import LumpsumGoal
def test_get_monthly_need_monthly_goal():
inflation_rate = 5
goal = MonthlyGoal(
monthly_amount=100,
start_date=datetime.now(),
end_date=datetime(2023, 12, 1, 1),
start_amount=0,
inflation_rate=inflation_rate,
)
for month_idx in range(0, 12):
assert goal.get_monthly_need(month_idx=month_idx) == 100
for month_idx in range(12, 24):
assert goal.get_monthly_need(month_idx=month_idx) == 105
for month_idx in range(24, 30):
assert goal.get_monthly_need(month_idx=month_idx) == 110.25
for month_idx in range(30, 36):
assert goal.get_monthly_need(month_idx=month_idx) == 0
def test_get_monthly_need_lumpsum_goal():
inflation_rate = 5
goal = LumpsumGoal(
end_amount=100,
start_date=datetime.now(),
end_date=datetime(2023, 12, 1, 1),
start_amount=0,
inflation_rate=inflation_rate,
)
for month_idx in range(0, goal.n_months):
assert goal.get_monthly_need(month_idx=month_idx) == 110.25 / goal.n_months
def test_n_months_and_end_date():
n_months_goal = LumpsumGoal(start_amount=0, end_amount=700000, start_date=datetime.now(), n_months=2)
end_date_goal = LumpsumGoal(start_amount=0, end_amount=700000, start_date=datetime.now(), end_date=datetime.now()+timedelta(days=75))
assert end_date_goal.n_months == n_months_goal.n_months
for month_idx in range(n_months_goal.n_months):
assert n_months_goal.get_monthly_need(month_idx) == end_date_goal.get_monthly_need(month_idx)
| StarcoderdataPython |
1927055 | <reponame>myyerrol/xm_arm_workspace<filename>xm_arm_trajectory_control/scripts/xm_arm_trajectory_move_test.py
#!/usr/bin/env python
"""
********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2016, Team-Xmbot-Service-Robot
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Team-Xmbot-Service-Robot nor the names
* of its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES sOF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
********************************************************************
"""
# Authors: myyerrol
# Created: 2016.4.15
import rospy
import actionlib
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from sys import exit
class ArmTrajectoryMoveTest:
def __init__(self):
self.arm_client = actionlib.SimpleActionClient(
"xm_arm/arm_controller/follow_joint_trajectory",
FollowJointTrajectoryAction)
rospy.init_node('xm_arm_trajectory_move_test', anonymous=False)
joint_dof = 6
print "------------ Test Arm Accurate Move ------------"
print " Version 2 "
print "Input joint's position to move arm "
print "Such as [joint/j x x x x x x] or [exit/e] "
while True:
print ">>> ",
keyboard_cmd = raw_input().split(" ")
try:
if keyboard_cmd[0] == "joint" or keyboard_cmd[0] == "j":
if (len(keyboard_cmd) - 1) == joint_dof:
joint_lift = -float(keyboard_cmd[1])
joint_waist = -float(keyboard_cmd[2])
joint_big_arm = float(keyboard_cmd[3])
joint_forearm = -float(keyboard_cmd[4])
joint_wrist_pitching = -float(keyboard_cmd[5])
joint_wrist_rotation = float(keyboard_cmd[6])
print "joint_lift:%lf joint_waist:%lf " \
"joint_big_arm:%lf joint_forearm:%lf " \
"joint_wrist_pitching:%lf " \
"joint_wrist_rotation:%lf" % (
joint_lift,
joint_waist,
joint_big_arm,
joint_forearm,
joint_wrist_pitching,
joint_wrist_rotation)
arm_goal = [joint_lift,
joint_waist,
joint_big_arm,
joint_forearm,
joint_wrist_pitching,
joint_wrist_rotation]
self.send_arm_goal(arm_goal)
else:
rospy.logerr("Joint's data must reach the dof of arm!")
elif keyboard_cmd[0] == "exit" or keyboard_cmd[0] == "e":
exit()
except Exception as exce:
print "Error!", exce
def send_arm_goal(self, arm_goal):
arm_joint_names = ['joint_lift',
'joint_waist',
'joint_big_arm',
'joint_forearm',
'joint_wrist_pitching',
'joint_wrist_rotation']
rospy.loginfo("Waiting for follow_joint_trajectory server")
self.arm_client.wait_for_server()
rospy.loginfo("Connected to follow_joint_trajectory server")
rospy.sleep(1)
arm_trajectory = JointTrajectory()
arm_trajectory.joint_names = arm_joint_names
arm_trajectory.points.append(JointTrajectoryPoint())
arm_trajectory.points[0].positions = arm_goal
arm_trajectory.points[0].time_from_start = rospy.Duration(10)
rospy.loginfo("Preparing for moving the arm to goal position!")
rospy.sleep(1)
arm_goal_pos = FollowJointTrajectoryGoal()
arm_goal_pos.trajectory = arm_trajectory
arm_goal_pos.goal_time_tolerance = rospy.Duration(0)
self.arm_client.send_goal(arm_goal_pos)
rospy.loginfo("Send goal to the trajectory server successfully!")
self.arm_client.wait_for_result()
if __name__ == '__main__':
try:
arm_position = ArmTrajectoryMoveTest()
except KeyboardInterrupt:
print "Exit!"
| StarcoderdataPython |
12800673 | <reponame>lchloride/manga_rock_crawler<gh_stars>1-10
import sqlite3
class Chapter:
def __init__(self, iid=None, chapterId=None, seriesId=None, directory=None,
createTime=None, updateTime=None, name=None, publish_time=None):
self._id = iid
self._chapterId = chapterId
self._seriesId = seriesId
self._directory = directory
self._createTime = createTime
self._updateTime = updateTime
self._name = name
self._publish_time = publish_time
def getId(self):
return self._id
def getChapterId(self):
return self._chapterId
def getSeriesId(self):
return self._seriesId
def getDirectory(self):
return self._directory
def getCreateTime(self):
return self._createTime
def getUpdateTime(self):
return self._updateTime
def getTuple(self):
return (self._id, self._chapterId, self._seriesId, self._directory, self._createTime,
self._updateTime, self._name, self._publish_time)
def getName(self):
return self._name
def getPublishTime(self):
return self._publish_time
def setId(self, iid):
self._id = iid
def setChapterId(self, chapterId):
self._chapterId = chapterId
def setSeriesId(self, seriesId):
self._seriesId = seriesId
def setDirectory(self, directory):
self._directory = directory
def setCreateTime(self, createTime):
self._createTime = createTime
def setUpdateTime(self, updateTime):
self._updateTime = updateTime
def setName(self, name):
self._name = name
def setPublishTime(self, publish_time):
self._publish_time = publish_time
def __str__(self) -> str:
return 'Chapter(id=%d, chapterId=%d, seriesId=%d, directory=%s, createTime=%d, ' \
'updateTime=%d, name=%s, publishTime=%d)' \
% (self._id, self._chapterId, self._seriesId, self._directory, self._createTime,
self._updateTime, self._name, self._publish_time)
class Series:
def __init__(self, iid=None, seriesId=None, updateTime=None, meta=None):
self._id = iid
self._seriesId = seriesId
self._updateTime = updateTime
self._meta = meta
def setId(self, iid):
self._id = iid
def setSeriesId(self, seriesId):
self._seriesId = seriesId
def setUpdateTime(self, updateTime):
self._updateTime = updateTime
def setMeta(self, meta):
self._meta = meta
def getId(self):
return self._id
def getSeriesId(self):
return self._seriesId
def getUpdateTime(self):
return self._updateTime
def getMeta(self):
return self._meta
def getTuple(self):
return (self._id, self._seriesId, self._updateTime, self._meta)
def __str__(self) -> str:
return 'Series(id=%d, seriesId=%d, updateTime=%d, meta=%s)' \
% (self._id, self._seriesId, self._updateTime, self._meta)
class DataManager:
def __init__(self, db_name):
self.conn = sqlite3.connect(db_name)
self.cursor = self.conn.cursor()
self.createDatabase()
def submit(self):
self.conn.commit()
def close(self):
self.conn.close()
def checkTableExists(self, tableName):
stmt = '''SELECT name FROM sqlite_master WHERE type='table' AND name=?'''
self.cursor.execute(stmt, (tableName, ))
row = self.cursor.fetchone()
if row is not None and len(row) > 0:
return True
else:
return False
def createDatabase(self):
print(self.checkTableExists('chapter'))
if not self.checkTableExists('chapter'):
self.createTableChapter()
if not self.checkTableExists('series'):
self.createTableSeries()
self.submit()
def createTableChapter(self):
stmt = '''CREATE TABLE "chapter" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"chapterid" INTEGER UNIQUE,
"seriesid" INTEGER NOT NULL,
"directory" TEXT,
"create_time" INTEGER,
"update_time" INTEGER,
"name" TEXT NOT NULL,
"publish_time" INTEGER NOT NULL
)'''
self.cursor.execute(stmt)
def createTableSeries(self):
stmt = '''CREATE TABLE `series` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`seriesid` INTEGER NOT NULL UNIQUE,
`update_time` INTEGER NOT NULL,
`meta` TEXT NOT NULL
)'''
self.cursor.execute(stmt)
def insertChapter(self, chapter):
stmt = '''INSERT INTO `chapter`(`chapterid`, `seriesid`, `directory`, `create_time`, `update_time`, `name`, `publish_time`)
VALUES (?, ?, ?, ?, ?, ?, ?)'''
data = chapter.getTuple()[1:]
self.cursor.execute(stmt, data)
self.submit()
def insertSeries(self, series):
stmt = '''INSERT INTO `series`(`seriesid`, `update_time`, `meta`) VALUES (?, ?, ?)'''
data = series.getTuple()[1:]
self.cursor.execute(stmt, data)
self.submit()
def selectChapterByChapterId(self, chapterId):
stmt = '''SELECT * FROM `chapter` WHERE chapterid=?'''
data = (chapterId,)
self.cursor.execute(stmt, data)
row = self.cursor.fetchone()
if row is None:
return None
chapter = Chapter(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7])
return chapter
def selectSeriesBySeriesId(self, seriesId):
stmt = '''SELECT * FROM `series` WHERE seriesid=?'''
data = (seriesId,)
self.cursor.execute(stmt, data)
row = self.cursor.fetchone()
if row is None:
return None
series = Series(row[0], row[1], row[2], row[3])
return series
def updateChapter(self, chapter):
stmt = '''UPDATE `chapter` SET `directory` = ?, `update_time`= ?, `name`=? WHERE `chapterid`=?'''
data = (chapter.getDirectory(), chapter.getUpdateTime(), chapter.getName(), chapter.getChapterId())
self.cursor.execute(stmt, data)
self.submit()
def updateSeries(self, series):
self.updateSeriesMeta(series)
def updateSeriesMeta(self, series):
stmt = '''UPDATE `series` SET `meta` = ?, `update_time` = ? WHERE `seriesid` = ?'''
data = (series.getMeta(), series.getUpdateTime(), series.getSeriesId())
self.cursor.execute(stmt, data)
self.submit()
if __name__ == '__main__':
dm = DataManager('./manga1/data.db')
| StarcoderdataPython |
8081663 | <filename>homeassistant/components/yamaha_musiccast/number.py<gh_stars>1-10
"""Number entities for musiccast."""
from aiomusiccast.capabilities import NumberSetter
from homeassistant.components.number import NumberEntity
from homeassistant.components.yamaha_musiccast import (
DOMAIN,
MusicCastCapabilityEntity,
MusicCastDataUpdateCoordinator,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up MusicCast number entities based on a config entry."""
coordinator: MusicCastDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
number_entities = []
for capability in coordinator.data.capabilities:
if isinstance(capability, NumberSetter):
number_entities.append(NumberCapability(coordinator, capability))
for zone, data in coordinator.data.zones.items():
for capability in data.capabilities:
if isinstance(capability, NumberSetter):
number_entities.append(NumberCapability(coordinator, capability, zone))
async_add_entities(number_entities)
class NumberCapability(MusicCastCapabilityEntity, NumberEntity):
"""Representation of a MusicCast Number entity."""
capability: NumberSetter
def __init__(
self,
coordinator: MusicCastDataUpdateCoordinator,
capability: NumberSetter,
zone_id: str = None,
) -> None:
"""Initialize the number entity."""
super().__init__(coordinator, capability, zone_id)
self._attr_min_value = capability.value_range.minimum
self._attr_max_value = capability.value_range.maximum
self._attr_step = capability.value_range.step
@property
def value(self):
"""Return the current value."""
return self.capability.current
async def async_set_value(self, value: float):
"""Set a new value."""
await self.capability.set(value)
| StarcoderdataPython |
3350390 | <reponame>KimSoungRyoul/PersistenceLayerInPythonApplication
from datetime import date
from django.core.management.base import BaseCommand
from apps.orders.models import DailyOrderReport, Order
class Command(BaseCommand):
help = "주문 통계 보고서 뽑아주는 커맨드"
def add_arguments(self, parser):
parser.add_argument(
"--type", action="store", required=False, help="통계 자료 보고방식", default="log",
)
def handle(self, *args, **options):
send_type = options["type"]
daily_report: DailyOrderReport = Order.raw_objects.get_daily_report(
day=date.today()
)
if send_type == "log":
self.stdout.write(self.style.SUCCESS(str(daily_report)))
elif send_type == "slack":
...
elif send_type == "email":
...
...
# 이런식으로 sql이 프로젝트내 Layer 구분없이 노출되어있는 코드들은 아키텍처 측면에서 보면 안티 패턴이다.
# Order.objects.raw(sql = """
# SELECT * FROM "ORDER" 블라블라
# """)
# Manager 클래스를 적극 활용하면 코드를 읽는 개발자입장에서는 ORM과 차이없게 만들수 있다.
# Manager에게 Persistence Layer의 책임을 확실하게 위임함
# Order.raw_objects.filter(blabla=".....").prefetch_related("....")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.