max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
uttut/pipeline/ops/tokenizers/tests/test_whitespace_tokenizer.py | Yoctol/uttut | 2 | 6620051 | <gh_stars>1-10
import pytest
from ...tests.common_tests import OperatorTestTemplate, ParamTuple
from ..whitespace_tokenizer import WhiteSpaceTokenizer
class TestWhiteSpaceTokenizer(OperatorTestTemplate):
params = [
ParamTuple(
"a \t \t \nb c",
[1, 0, 0, 0, 0, 0, 0, 2, 0, 3],
["a", "b", "c"],
[1, 2, 3],
id='eng',
),
ParamTuple(
" a \t \t \nb c\n\r",
[0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 3, 0, 0],
["a", "b", "c"],
[1, 2, 3],
id='eng with whitespace at head and tail',
),
ParamTuple(
"GB亂入",
[2, 2, 2, 2],
["GB亂入"],
[2],
id='zh',
),
ParamTuple(
"",
[],
[],
[],
id='empty string',
),
]
@pytest.fixture(scope='class')
def op(self):
return WhiteSpaceTokenizer()
def test_equal(self, op):
assert WhiteSpaceTokenizer() == op
| import pytest
from ...tests.common_tests import OperatorTestTemplate, ParamTuple
from ..whitespace_tokenizer import WhiteSpaceTokenizer
class TestWhiteSpaceTokenizer(OperatorTestTemplate):
params = [
ParamTuple(
"a \t \t \nb c",
[1, 0, 0, 0, 0, 0, 0, 2, 0, 3],
["a", "b", "c"],
[1, 2, 3],
id='eng',
),
ParamTuple(
" a \t \t \nb c\n\r",
[0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 3, 0, 0],
["a", "b", "c"],
[1, 2, 3],
id='eng with whitespace at head and tail',
),
ParamTuple(
"GB亂入",
[2, 2, 2, 2],
["GB亂入"],
[2],
id='zh',
),
ParamTuple(
"",
[],
[],
[],
id='empty string',
),
]
@pytest.fixture(scope='class')
def op(self):
return WhiteSpaceTokenizer()
def test_equal(self, op):
assert WhiteSpaceTokenizer() == op | none | 1 | 2.367054 | 2 | |
pymydump/cmd/main.py | aakso/pymydump | 0 | 6620052 | from __future__ import print_function, unicode_literals
import argparse
import logging
import os
import re
import signal
import sys
import time
from pymydump.dumper import MySQLDumper
from pymydump.errors import PyMyDumpError
from pymydump.expire import ExpireDirectoryNumFiles
from pymydump.log import set_debug, setup_logging
from pymydump.output import FileOutput
from pymydump.stream import DBStream
DEFAULT_DB_PATTERN = r'^(?!(information_schema|performance_schema|sys)$)'
def run_tool(args):
if not args.out_file and not args.out_dir:
args.out_file = '-'
if args.out_file and args.out_dir:
raise PyMyDumpError('cannot have both out_file and out_dir')
dumper = MySQLDumper(
host=args.host,
username=args.username,
password=<PASSWORD>,
opts=args.mysqldump_opts)
single_stream = True if args.out_file else False
stream = DBStream(
dumper,
pattern=args.db_pattern,
compressor_name=args.compress,
single_stream=single_stream)
out = FileOutput(stream.stream())
if args.out_file:
out.write_to_file(args.out_file)
if args.out_dir:
type_suffix = '.sql'
if args.compress == 'bz2':
type_suffix += '.bz2'
if args.keep > 0:
expire = ExpireDirectoryNumFiles(args.out_dir, args.keep)
suffix = '-{}{}'.format(time.strftime('%Y%m%d%H%M%S'), type_suffix)
for name, db in out.write_to_dir(args.out_dir, suffix):
print(name)
if args.keep > 0:
expire_pat = re.compile(r'^{}-[0-9]+{}$'.\
format(db, type_suffix))
expire.expire(expire_pat)
def main():
setup_logging()
parser = argparse.ArgumentParser(
description='Tool to do sensible MySQL dumps with mysqldump')
parser.add_argument(
'--keep',
type=int,
metavar='NUM',
default=os.environ.get('PYMYDUMP_KEEP', -1),
help='Keep num amount of dumps, makes only sense with --outdir')
parser.add_argument(
'--username',
metavar='STRING',
default=os.environ.get('PYMYDUMP_USERNAME', os.environ.get('USER')),
help='Username to use to connect to database')
parser.add_argument(
'--compress',
choices=['none', 'bz2'],
default=os.environ.get('PYMYDUMP_COMPRESS', 'none'),
help='Dump compression method')
parser.add_argument(
'--password',
metavar='STRING',
default=os.environ.get('PYMYDUMP_PASSWORD'),
help='Password to use to connect to database')
parser.add_argument(
'--host',
metavar='HOSTNAME',
default=os.environ.get('PYMYDUMP_HOST', 'localhost'),
help='Host to connect to')
parser.add_argument(
'--db-pattern',
metavar='REGEXP',
type=re.compile,
default=os.environ.get('PYMYDUMP_DB_PATTERN', DEFAULT_DB_PATTERN),
help='Databases to be dumped')
parser.add_argument(
'--mysqldump-opts',
metavar='KEY1=VAL,KEY2=VAL,...',
default=os.environ.get('PYMYDUMP_MYSQLDUMP_OPTS'),
help='Additional options to pass to mysqldump')
parser.add_argument(
'--out-file',
metavar='FILE',
default=os.environ.get('PYMYDUMP_OUTFILE'),
help='File to write dumps to. Use - for stdout')
parser.add_argument(
'--out-dir',
metavar='PATH',
default=os.environ.get('PYMYDUMP_OUTDIR'),
help='Path to write dumps in individual files')
parser.add_argument(
'--debug',
action='store_true',
default=parse_bool(os.environ.get('PYMYDUMP_DEBUG')),
help='Enable debug logging to STDERR')
args = parser.parse_args()
try:
if args.debug:
set_debug()
if args.mysqldump_opts:
props = args.mysqldump_opts[:]
args.mysqldump_opts = [parse_kvs(item)
for item in parse_list(props)]
run_tool(args)
except PyMyDumpError as e:
print('ERROR: {}'.format(e), file=sys.stderr)
return 1
except KeyboardInterrupt:
print('User interrupt')
return 1
return 0
def parse_bool(val):
if val and val.lower() in ['true', 't', '1']:
return True
else:
return False
def parse_list(val):
if val:
return val.split(',')
else:
return []
def parse_kvs(val):
p = val.split('=')
if len(p) == 1:
return (p[0].strip(), None)
elif len(p) == 2:
return (p[0].strip(), p[1].strip())
else:
raise PyMyDumpError('cannot parse: {}'.format(val))
if __name__ == '__main__':
sys.exit(main())
| from __future__ import print_function, unicode_literals
import argparse
import logging
import os
import re
import signal
import sys
import time
from pymydump.dumper import MySQLDumper
from pymydump.errors import PyMyDumpError
from pymydump.expire import ExpireDirectoryNumFiles
from pymydump.log import set_debug, setup_logging
from pymydump.output import FileOutput
from pymydump.stream import DBStream
DEFAULT_DB_PATTERN = r'^(?!(information_schema|performance_schema|sys)$)'
def run_tool(args):
if not args.out_file and not args.out_dir:
args.out_file = '-'
if args.out_file and args.out_dir:
raise PyMyDumpError('cannot have both out_file and out_dir')
dumper = MySQLDumper(
host=args.host,
username=args.username,
password=<PASSWORD>,
opts=args.mysqldump_opts)
single_stream = True if args.out_file else False
stream = DBStream(
dumper,
pattern=args.db_pattern,
compressor_name=args.compress,
single_stream=single_stream)
out = FileOutput(stream.stream())
if args.out_file:
out.write_to_file(args.out_file)
if args.out_dir:
type_suffix = '.sql'
if args.compress == 'bz2':
type_suffix += '.bz2'
if args.keep > 0:
expire = ExpireDirectoryNumFiles(args.out_dir, args.keep)
suffix = '-{}{}'.format(time.strftime('%Y%m%d%H%M%S'), type_suffix)
for name, db in out.write_to_dir(args.out_dir, suffix):
print(name)
if args.keep > 0:
expire_pat = re.compile(r'^{}-[0-9]+{}$'.\
format(db, type_suffix))
expire.expire(expire_pat)
def main():
setup_logging()
parser = argparse.ArgumentParser(
description='Tool to do sensible MySQL dumps with mysqldump')
parser.add_argument(
'--keep',
type=int,
metavar='NUM',
default=os.environ.get('PYMYDUMP_KEEP', -1),
help='Keep num amount of dumps, makes only sense with --outdir')
parser.add_argument(
'--username',
metavar='STRING',
default=os.environ.get('PYMYDUMP_USERNAME', os.environ.get('USER')),
help='Username to use to connect to database')
parser.add_argument(
'--compress',
choices=['none', 'bz2'],
default=os.environ.get('PYMYDUMP_COMPRESS', 'none'),
help='Dump compression method')
parser.add_argument(
'--password',
metavar='STRING',
default=os.environ.get('PYMYDUMP_PASSWORD'),
help='Password to use to connect to database')
parser.add_argument(
'--host',
metavar='HOSTNAME',
default=os.environ.get('PYMYDUMP_HOST', 'localhost'),
help='Host to connect to')
parser.add_argument(
'--db-pattern',
metavar='REGEXP',
type=re.compile,
default=os.environ.get('PYMYDUMP_DB_PATTERN', DEFAULT_DB_PATTERN),
help='Databases to be dumped')
parser.add_argument(
'--mysqldump-opts',
metavar='KEY1=VAL,KEY2=VAL,...',
default=os.environ.get('PYMYDUMP_MYSQLDUMP_OPTS'),
help='Additional options to pass to mysqldump')
parser.add_argument(
'--out-file',
metavar='FILE',
default=os.environ.get('PYMYDUMP_OUTFILE'),
help='File to write dumps to. Use - for stdout')
parser.add_argument(
'--out-dir',
metavar='PATH',
default=os.environ.get('PYMYDUMP_OUTDIR'),
help='Path to write dumps in individual files')
parser.add_argument(
'--debug',
action='store_true',
default=parse_bool(os.environ.get('PYMYDUMP_DEBUG')),
help='Enable debug logging to STDERR')
args = parser.parse_args()
try:
if args.debug:
set_debug()
if args.mysqldump_opts:
props = args.mysqldump_opts[:]
args.mysqldump_opts = [parse_kvs(item)
for item in parse_list(props)]
run_tool(args)
except PyMyDumpError as e:
print('ERROR: {}'.format(e), file=sys.stderr)
return 1
except KeyboardInterrupt:
print('User interrupt')
return 1
return 0
def parse_bool(val):
if val and val.lower() in ['true', 't', '1']:
return True
else:
return False
def parse_list(val):
if val:
return val.split(',')
else:
return []
def parse_kvs(val):
p = val.split('=')
if len(p) == 1:
return (p[0].strip(), None)
elif len(p) == 2:
return (p[0].strip(), p[1].strip())
else:
raise PyMyDumpError('cannot parse: {}'.format(val))
if __name__ == '__main__':
sys.exit(main())
| none | 1 | 2.183977 | 2 | |
roster/migrations/0006_auto_20170806_0035.py | ankanb240/otis-web | 15 | 6620053 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-08-06 00:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roster', '0005_auto_20170806_0031'),
]
operations = [
migrations.AddField(
model_name='student',
name='name',
field=models.CharField(default='Nameless Student', help_text='The display name for this student (e.g. a nickname)', max_length=80),
preserve_default=False,
),
migrations.AddField(
model_name='ta',
name='name',
field=models.CharField(default='Nameless TA', help_text='The display name for this TA (e.g. a nickname)', max_length=80),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-08-06 00:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roster', '0005_auto_20170806_0031'),
]
operations = [
migrations.AddField(
model_name='student',
name='name',
field=models.CharField(default='Nameless Student', help_text='The display name for this student (e.g. a nickname)', max_length=80),
preserve_default=False,
),
migrations.AddField(
model_name='ta',
name='name',
field=models.CharField(default='Nameless TA', help_text='The display name for this TA (e.g. a nickname)', max_length=80),
preserve_default=False,
),
]
| en | 0.818947 | # -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2017-08-06 00:35 | 1.705636 | 2 |
exercises1-115/d077 - Contando vogais em tupla.py | renankalfa/Curso_em_Video | 3 | 6620054 | palavras = ('aprender', 'programar', 'linguaguem', 'python',
'curso', 'gratis')
vogais = ('a', 'e', 'i', 'o', 'u')
for palavra in palavras:
print(f'\nNa palavra {palavra.upper()} temos as vogais: ', end='')
for silaba in palavra:
if silaba in vogais:
print(silaba, end=' ')
| palavras = ('aprender', 'programar', 'linguaguem', 'python',
'curso', 'gratis')
vogais = ('a', 'e', 'i', 'o', 'u')
for palavra in palavras:
print(f'\nNa palavra {palavra.upper()} temos as vogais: ', end='')
for silaba in palavra:
if silaba in vogais:
print(silaba, end=' ')
| none | 1 | 3.927614 | 4 | |
tppm/ui/busy_manager.py | timtumturutumtum/TraktPlaybackProgressManager | 36 | 6620055 | # coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from six import itervalues
from six.moves.tkinter import TclError
class BusyManager(object):
# Based on http://effbot.org/zone/tkinter-busy.htm
def __init__(self, widget):
self.toplevel = widget.winfo_toplevel()
self.widgets = {}
def busy(self, widget=None):
# attach busy cursor to toplevel, plus all windows
# that define their own cursor.
if widget is None:
w = self.toplevel # myself
else:
w = widget
if str(w) not in self.widgets:
# attach cursor to this widget
cursor = self._get_cursor(w)
if cursor is not None and cursor != 'watch':
self.widgets[str(w)] = (w, cursor)
self._set_cursor(w, 'watch')
for w in itervalues(w.children):
self.busy(w)
def unbusy(self, widget=None):
# restore cursors
if widget is not None and str(widget) in self.widgets:
w, cursor = self.widgets[str(widget)]
self._set_cursor(w, cursor)
del self.widgets[str(widget)]
for w in itervalues(w.children):
self.unbusy(w)
else:
for w, cursor in itervalues(self.widgets):
self._set_cursor(w, cursor)
self.widgets = {}
@staticmethod
def _get_cursor(widget):
try:
return widget.cget('cursor')
except TclError:
return None
@staticmethod
def _set_cursor(widget, cursor):
try:
widget.config(cursor=cursor)
except TclError:
pass
| # coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from six import itervalues
from six.moves.tkinter import TclError
class BusyManager(object):
# Based on http://effbot.org/zone/tkinter-busy.htm
def __init__(self, widget):
self.toplevel = widget.winfo_toplevel()
self.widgets = {}
def busy(self, widget=None):
# attach busy cursor to toplevel, plus all windows
# that define their own cursor.
if widget is None:
w = self.toplevel # myself
else:
w = widget
if str(w) not in self.widgets:
# attach cursor to this widget
cursor = self._get_cursor(w)
if cursor is not None and cursor != 'watch':
self.widgets[str(w)] = (w, cursor)
self._set_cursor(w, 'watch')
for w in itervalues(w.children):
self.busy(w)
def unbusy(self, widget=None):
# restore cursors
if widget is not None and str(widget) in self.widgets:
w, cursor = self.widgets[str(widget)]
self._set_cursor(w, cursor)
del self.widgets[str(widget)]
for w in itervalues(w.children):
self.unbusy(w)
else:
for w, cursor in itervalues(self.widgets):
self._set_cursor(w, cursor)
self.widgets = {}
@staticmethod
def _get_cursor(widget):
try:
return widget.cget('cursor')
except TclError:
return None
@staticmethod
def _set_cursor(widget, cursor):
try:
widget.config(cursor=cursor)
except TclError:
pass
| en | 0.770895 | # coding: utf-8 # Based on http://effbot.org/zone/tkinter-busy.htm # attach busy cursor to toplevel, plus all windows # that define their own cursor. # myself # attach cursor to this widget # restore cursors | 2.191652 | 2 |
parsl/tests/test_error_handling/test_fail.py | cylondata/parsl | 323 | 6620056 | import pytest
from parsl.app.app import python_app
@python_app
def always_fail():
raise ValueError("This ValueError should propagate to the app caller in fut.result()")
def test_simple():
with pytest.raises(ValueError):
fut = always_fail()
fut.result()
| import pytest
from parsl.app.app import python_app
@python_app
def always_fail():
raise ValueError("This ValueError should propagate to the app caller in fut.result()")
def test_simple():
with pytest.raises(ValueError):
fut = always_fail()
fut.result()
| none | 1 | 2.357033 | 2 | |
apps/fyle/utils.py | fylein/fyle-xero-api | 0 | 6620057 | from typing import List
from django.conf import settings
from fylesdk import FyleSDK
class FyleConnector:
"""
Fyle utility functions
"""
def __init__(self, refresh_token):
client_id = settings.FYLE_CLIENT_ID
client_secret = settings.FYLE_CLIENT_SECRET
base_url = settings.FYLE_BASE_URL
self.connection = FyleSDK(
base_url=base_url,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
)
def get_attachments(self, expense_ids: List[str]):
"""
Get attachments against expense_ids
"""
attachments = []
if expense_ids:
for expense_id in expense_ids:
attachment_file_names = []
attachment = self.connection.Expenses.get_attachments(expense_id)
for attachment in attachment['data']:
if attachment['filename'] not in attachment_file_names:
attachment['expense_id'] = expense_id
attachments.append(attachment)
attachment_file_names.append(attachment['filename'])
return attachments
return []
def post_reimbursement(self, reimbursement_ids: list):
"""
Process Reimbursements in bulk.
"""
return self.connection.Reimbursements.post(reimbursement_ids)
| from typing import List
from django.conf import settings
from fylesdk import FyleSDK
class FyleConnector:
"""
Fyle utility functions
"""
def __init__(self, refresh_token):
client_id = settings.FYLE_CLIENT_ID
client_secret = settings.FYLE_CLIENT_SECRET
base_url = settings.FYLE_BASE_URL
self.connection = FyleSDK(
base_url=base_url,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
)
def get_attachments(self, expense_ids: List[str]):
"""
Get attachments against expense_ids
"""
attachments = []
if expense_ids:
for expense_id in expense_ids:
attachment_file_names = []
attachment = self.connection.Expenses.get_attachments(expense_id)
for attachment in attachment['data']:
if attachment['filename'] not in attachment_file_names:
attachment['expense_id'] = expense_id
attachments.append(attachment)
attachment_file_names.append(attachment['filename'])
return attachments
return []
def post_reimbursement(self, reimbursement_ids: list):
"""
Process Reimbursements in bulk.
"""
return self.connection.Reimbursements.post(reimbursement_ids)
| en | 0.847957 | Fyle utility functions Get attachments against expense_ids Process Reimbursements in bulk. | 2.250561 | 2 |
utils/yaml_utils.py | balansky/pytorch_gan | 20 | 6620058 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
# Copy from tgans repo.
class Config(object):
def __init__(self, config_dict):
self.config = config_dict
def __getattr__(self, key):
if key in self.config:
return self.config[key]
else:
raise AttributeError(key)
def __getitem__(self, key):
return self.config[key]
def __repr__(self):
return yaml.dump(self.config, default_flow_style=False)
| # !/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
# Copy from tgans repo.
class Config(object):
def __init__(self, config_dict):
self.config = config_dict
def __getattr__(self, key):
if key in self.config:
return self.config[key]
else:
raise AttributeError(key)
def __getitem__(self, key):
return self.config[key]
def __repr__(self):
return yaml.dump(self.config, default_flow_style=False)
| en | 0.555083 | # !/usr/bin/env python # -*- coding: utf-8 -*- # Copy from tgans repo. | 2.432913 | 2 |
client/src/utility/button.py | juan-nunez/Space_combat | 0 | 6620059 | <gh_stars>0
from rectangle import Rectangle
class Button(Rectangle):
def __init__(self,left, top, width, height):
Rectangle.__init__(self,left,top,width,height) | from rectangle import Rectangle
class Button(Rectangle):
def __init__(self,left, top, width, height):
Rectangle.__init__(self,left,top,width,height) | none | 1 | 2.990892 | 3 | |
src/banking.py | GalaxyDigitalLLC/Financial-Industry-Electricity-Balance-Scripts | 3 | 6620060 | import statistics
import helpers
from contribution import Contribution
class Banking:
def __init__(self, file_path):
self.data = helpers.read_yaml(file_path)
self.datacenters = Datacenters(self.data['server'])
self.branches = Branches(self.data['branch'])
self.atms = ATMs(self.data['atm'])
self.cns = CardNetworks(self.data['cn'])
self.usage = self.datacenters.usage
self.usage += self.branches.usage
self.usage += self.atms.usage
self.usage += self.cns.usage
self.usage_contributions = {
'DataCenters': self.datacenters.usage,
'Branches': self.branches.usage,
'ATMs': self.atms.usage,
'Card Networks': self.cns.usage,
}
def __repr__(self):
rep = 'Banking System ...............'
rep += " {:.2f} TWh/yr".format(self.usage)
rep += '\n\n'
rep += self.alignment('\t')
return rep
def __str__(self):
print_str = 'Banking System ...............'
print_str += " {:.2f} TWh/yr".format(self.usage)
print_str += '\n\n'
print_str += self.alignment('\t')
return print_str
def alignment(self, tabs=''):
res = ''
max_pad = 28
max_num_char = 0
# Get max number of characters in each value in order to get proper
# number of '.' and ' ' on value print
for k, v in self.usage_contributions.items():
value = '{:.2f}'.format(v)
value_len = len(value)
if value_len > max_num_char:
max_num_char = value_len
for k, v in self.usage_contributions.items():
# Number of characters in value name
first_len = len(k)
value = '{:.2f}'.format(v)
# Number of characters in value
second_len = len(value)
# Align value wrt char length of longest value
diff_len = max_num_char - second_len
# Number of dots is the dfference of `max_pad` and the combined key
# and value character length
num_dots = max_pad - (first_len + second_len)
# Create resulting string
res += tabs + k
res += ' '
res += '.' * (num_dots - diff_len)
res += ' ' * (diff_len + 1)
res += value
res += ' TWh/yr'
res += '\n'
return res
class Datacenters(Contribution):
def get_usage(self):
op_hours = self.data['hours']
deposits_total = self.data['total_deposit_100']
deposits_boa = self.data['boa']['total_deposit']
num_dc_boa = self.data['boa']['num_dc']
num_dc = deposits_total * num_dc_boa / deposits_boa
area_dc = self.data['dc_area']
demand_per_area = self.data['server_demand_per_sq_ft']
total_dc_demand = num_dc * area_dc * demand_per_area
self.usage = helpers.kw_to_tw(total_dc_demand * op_hours)
class Branches(Contribution):
def get_usage(self):
num_per_100k_adults = self.data['num_per_100k_adults']
bus_usage = self.ave_bus_usage()
num_branches = round(helpers.pop() * num_per_100k_adults / 100_000, 0)
self.usage = helpers.kw_to_tw(num_branches * bus_usage)
def ave_bus_usage(self):
us_bus = self.data['business']['us']
uk_bus = self.data['business']['uk']
us_res = self.data['residential']['us']
uk_res = self.data['residential']['uk']
mexico_res = self.data['residential']['mexico']
china_res = statistics.mean(self.data['residential']['china'].values())
us_ratio = us_bus / us_res
uk_ratio = uk_bus / uk_res
ratio = statistics.mean([us_ratio, uk_ratio])
mexico_bus = ratio * mexico_res
china_bus = ratio * china_res
return statistics.mean([us_bus, uk_bus, mexico_bus, china_bus])
class ATMs(Contribution):
def get_usage(self):
op_hours = self.data['hours']
single_atm_demand = self.data['demand']
num_per_100k_adults = self.data['num_per_100k_adults']
num_atms = round(helpers.pop() * num_per_100k_adults / 100_000, 0)
self.usage = helpers.kw_to_tw(num_atms * single_atm_demand * op_hours)
class CardNetworks(Contribution):
def get_usage(self):
op_hours = self.data['hours']
total_area_visa_dc = sum(self.data['visa']['facility'].values())
server_demand_per_sq_ft = self.data['server_demand_per_sq_ft']
visa_usage = total_area_visa_dc * server_demand_per_sq_ft * op_hours
visa_btx = self.data['visa']['b_tx']
total_btx = self.data['b_tx']
self.usage = helpers.kw_to_tw(visa_usage / visa_btx * total_btx)
| import statistics
import helpers
from contribution import Contribution
class Banking:
def __init__(self, file_path):
self.data = helpers.read_yaml(file_path)
self.datacenters = Datacenters(self.data['server'])
self.branches = Branches(self.data['branch'])
self.atms = ATMs(self.data['atm'])
self.cns = CardNetworks(self.data['cn'])
self.usage = self.datacenters.usage
self.usage += self.branches.usage
self.usage += self.atms.usage
self.usage += self.cns.usage
self.usage_contributions = {
'DataCenters': self.datacenters.usage,
'Branches': self.branches.usage,
'ATMs': self.atms.usage,
'Card Networks': self.cns.usage,
}
def __repr__(self):
rep = 'Banking System ...............'
rep += " {:.2f} TWh/yr".format(self.usage)
rep += '\n\n'
rep += self.alignment('\t')
return rep
def __str__(self):
print_str = 'Banking System ...............'
print_str += " {:.2f} TWh/yr".format(self.usage)
print_str += '\n\n'
print_str += self.alignment('\t')
return print_str
def alignment(self, tabs=''):
res = ''
max_pad = 28
max_num_char = 0
# Get max number of characters in each value in order to get proper
# number of '.' and ' ' on value print
for k, v in self.usage_contributions.items():
value = '{:.2f}'.format(v)
value_len = len(value)
if value_len > max_num_char:
max_num_char = value_len
for k, v in self.usage_contributions.items():
# Number of characters in value name
first_len = len(k)
value = '{:.2f}'.format(v)
# Number of characters in value
second_len = len(value)
# Align value wrt char length of longest value
diff_len = max_num_char - second_len
# Number of dots is the dfference of `max_pad` and the combined key
# and value character length
num_dots = max_pad - (first_len + second_len)
# Create resulting string
res += tabs + k
res += ' '
res += '.' * (num_dots - diff_len)
res += ' ' * (diff_len + 1)
res += value
res += ' TWh/yr'
res += '\n'
return res
class Datacenters(Contribution):
def get_usage(self):
op_hours = self.data['hours']
deposits_total = self.data['total_deposit_100']
deposits_boa = self.data['boa']['total_deposit']
num_dc_boa = self.data['boa']['num_dc']
num_dc = deposits_total * num_dc_boa / deposits_boa
area_dc = self.data['dc_area']
demand_per_area = self.data['server_demand_per_sq_ft']
total_dc_demand = num_dc * area_dc * demand_per_area
self.usage = helpers.kw_to_tw(total_dc_demand * op_hours)
class Branches(Contribution):
def get_usage(self):
num_per_100k_adults = self.data['num_per_100k_adults']
bus_usage = self.ave_bus_usage()
num_branches = round(helpers.pop() * num_per_100k_adults / 100_000, 0)
self.usage = helpers.kw_to_tw(num_branches * bus_usage)
def ave_bus_usage(self):
us_bus = self.data['business']['us']
uk_bus = self.data['business']['uk']
us_res = self.data['residential']['us']
uk_res = self.data['residential']['uk']
mexico_res = self.data['residential']['mexico']
china_res = statistics.mean(self.data['residential']['china'].values())
us_ratio = us_bus / us_res
uk_ratio = uk_bus / uk_res
ratio = statistics.mean([us_ratio, uk_ratio])
mexico_bus = ratio * mexico_res
china_bus = ratio * china_res
return statistics.mean([us_bus, uk_bus, mexico_bus, china_bus])
class ATMs(Contribution):
def get_usage(self):
op_hours = self.data['hours']
single_atm_demand = self.data['demand']
num_per_100k_adults = self.data['num_per_100k_adults']
num_atms = round(helpers.pop() * num_per_100k_adults / 100_000, 0)
self.usage = helpers.kw_to_tw(num_atms * single_atm_demand * op_hours)
class CardNetworks(Contribution):
def get_usage(self):
op_hours = self.data['hours']
total_area_visa_dc = sum(self.data['visa']['facility'].values())
server_demand_per_sq_ft = self.data['server_demand_per_sq_ft']
visa_usage = total_area_visa_dc * server_demand_per_sq_ft * op_hours
visa_btx = self.data['visa']['b_tx']
total_btx = self.data['b_tx']
self.usage = helpers.kw_to_tw(visa_usage / visa_btx * total_btx)
| en | 0.779418 | # Get max number of characters in each value in order to get proper # number of '.' and ' ' on value print # Number of characters in value name # Number of characters in value # Align value wrt char length of longest value # Number of dots is the dfference of `max_pad` and the combined key # and value character length # Create resulting string | 2.815871 | 3 |
src/fencex/idps/__init__.py | uc-cdis/fencex | 0 | 6620061 | <gh_stars>0
from authlib.integrations.starlette_client import OAuth
from ..config import config
oauth = OAuth(config)
| from authlib.integrations.starlette_client import OAuth
from ..config import config
oauth = OAuth(config) | none | 1 | 1.292227 | 1 | |
VBF/fitting/makeLimitForest.py | GuillelmoGomezCeballos/PandaAnalysis | 0 | 6620062 | <reponame>GuillelmoGomezCeballos/PandaAnalysis<filename>VBF/fitting/makeLimitForest.py<gh_stars>0
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import path,getenv
import argparse
parser = argparse.ArgumentParser(description='make forest')
parser.add_argument('--region',metavar='region',type=str,default=None)
toProcess = parser.parse_args().region
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.Functions # kinematics
#import PandaAnalysis.VBF.Selection as sel
#import PandaAnalysis.VBF.MonojetSelection as sel
import PandaAnalysis.VBF.LooseSelection as sel
Load('PandaAnalysisFlat','LimitTreeBuilder')
baseDir = getenv('PANDA_ZEYNEPDIR')+'/merged/'
lumi = 36600
factory = root.LimitTreeBuilder()
if toProcess:
factory.SetOutFile(baseDir+'/limits/limitForest_%s.root'%toProcess)
else:
factory.SetOutFile(baseDir+'/limits/limitForest_all.root')
def dataCut(basecut,trigger):
# return tAND('metFilter==1',tAND(trigger,basecut))
return tAND(trigger,basecut)
#return tAND(tAND(trigger,basecut),'runNum<=276811')
treelist = []
def getTree(fpath):
global treelist
fIn = root.TFile(baseDir+fpath+'.root')
tIn = fIn.Get('events')
treelist.append(tIn)
return tIn,fIn
def enable(regionName):
if toProcess:
return (toProcess==regionName)
else:
return True
# input
tZll,fZll = getTree('ZJets')
tZvv,fZvv = getTree('ZtoNuNu')
tWlv,fWlv = getTree('WJets')
#tWlv_nlo,fWlv_nlo = getTree('WJets_nlo')
tewkZll,fewkZll = getTree('EWKZJets')
tewkZvv,fewkZvv = getTree('EWKZtoNuNu')
tewkWlv,fewkWlv = getTree('EWKWJets')
tPho,fPho = getTree('GJets')
tTTbar,fTT = getTree('TTbar')
tVV,fVV = getTree('Diboson')
tQCD,fQCD = getTree('QCD')
tST,fST = getTree('SingleTop')
tMET,fMET = getTree('MET')
tSingleEle,fSEle = getTree('SingleElectron')
tSinglePho,fSPho = getTree('SinglePhoton')
tVBF,fVBF = getTree('VBF_H125')
tGGF,fGGF = getTree('GGF_H125')
tAllWlv = root.TChain('events')
for f in ['WJets','EWKWJets']:
tAllWlv.AddFile(baseDir+'/'+f+'.root')
tAllZll = root.TChain('events')
for f in ['ZJets','EWKZJets']:
tAllZll.AddFile(baseDir+'/'+f+'.root')
tAllZvv = root.TChain('events')
for f in ['ZtoNuNu','EWKZtoNuNu']:
tAllZvv.AddFile(baseDir+'/'+f+'.root')
treelist += [tAllWlv,tAllZll,tAllZvv]
factory.cd()
regions = {}
processes = {}
vm = root.VariableMap()
vm.AddVar('met','met')
vm.AddVar('metPhi','metPhi')
vm.AddVar('genBosonPt','genBos_pt')
vm.AddVar('genBosonPhi','genBos_phi')
#for x in ['jjDEta','mjj','jot1Pt','jot2Pt','jot1Eta','jot2Eta','minJetMetDPhi_withendcap']:
for x in ['jjDEta','mjj','minJetMetDPhi_withendcap']:
vm.AddVar(x,x)
vm.AddFormula('jjDPhi','fabs(SignedDeltaPhi(jot1Phi,jot2Phi))')
# test region
if enable('test'):
regions['test'] = root.Region('test')
cut = sel.cuts['signal']
weight = '%f*%s'%(lumi,sel.weights['signal'])
processes['test'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Diboson',tVV,vm,cut,weight),
]
for p in processes['test']:
regions['test'].AddProcess(p)
factory.AddRegion(regions['test'])
# signal region
if enable('signal'):
regions['signal'] = root.Region('signal')
cut = sel.cuts['signal']
weight = '%f*%s'%(lumi,sel.weights['signal'])
PInfo('makeLimitForest.py',cut)
processes['signal'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('VBF_H125',tVBF,vm,cut,weight),
root.Process('GGF_H125',tGGF,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['signal']:
regions['signal'].AddProcess(p)
factory.AddRegion(regions['signal'])
# wmn
if enable('wmn'):
regions['wmn'] = root.Region('wmn')
cut = sel.cuts['wmn']
weight = '%f*%s'%(lumi,sel.weights['wmn'])
processes['wmn'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['wmn']:
regions['wmn'].AddProcess(p)
factory.AddRegion(regions['wmn'])
# wen
if enable('wen'):
regions['wen'] = root.Region('wen')
cut = sel.cuts['wen']
weight = '%f*%s'%(lumi,sel.weights['wen'])
processes['wen'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['wen']:
regions['wen'].AddProcess(p)
factory.AddRegion(regions['wen'])
# zmm
if enable('zmm'):
regions['zmm'] = root.Region('zmm')
cut = sel.cuts['zmm']
weight = '%f*%s'%(lumi,sel.weights['zmm'])
processes['zmm'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['zmm']:
regions['zmm'].AddProcess(p)
factory.AddRegion(regions['zmm'])
# zee
if enable('zee'):
regions['zee'] = root.Region('zee')
cut = sel.cuts['zee']
weight = '%f*%s'%(lumi,sel.weights['zee'])
processes['zee'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['zee']:
regions['zee'].AddProcess(p)
factory.AddRegion(regions['zee'])
# photon
if enable('pho'):
regions['pho'] = root.Region('pho')
cut = sel.cuts['pho']
weight = '%f*%s'%(lumi,sel.weights['pho'])
processes['pho'] = [
root.Process('Data',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'1'),
root.Process('Pho',tPho,vm,cut,tTIMES('akfactor*ewk_a',weight)),
# root.Process('QCD',tSinglePho,vmA,dataCut(cut,phoTrigger),'photonPurityWeight'),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['pho']:
regions['pho'].AddProcess(p)
factory.AddRegion(regions['pho'])
PInfo('makeLimitForest','Starting '+str(toProcess))
factory.Run()
PInfo('makeLimitForest','Finishing '+str(toProcess))
for t in treelist:
t.SetDirectory(0)
factory.Output()
| #!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import path,getenv
import argparse
parser = argparse.ArgumentParser(description='make forest')
parser.add_argument('--region',metavar='region',type=str,default=None)
toProcess = parser.parse_args().region
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.Functions # kinematics
#import PandaAnalysis.VBF.Selection as sel
#import PandaAnalysis.VBF.MonojetSelection as sel
import PandaAnalysis.VBF.LooseSelection as sel
Load('PandaAnalysisFlat','LimitTreeBuilder')
baseDir = getenv('PANDA_ZEYNEPDIR')+'/merged/'
lumi = 36600
factory = root.LimitTreeBuilder()
if toProcess:
factory.SetOutFile(baseDir+'/limits/limitForest_%s.root'%toProcess)
else:
factory.SetOutFile(baseDir+'/limits/limitForest_all.root')
def dataCut(basecut,trigger):
# return tAND('metFilter==1',tAND(trigger,basecut))
return tAND(trigger,basecut)
#return tAND(tAND(trigger,basecut),'runNum<=276811')
treelist = []
def getTree(fpath):
global treelist
fIn = root.TFile(baseDir+fpath+'.root')
tIn = fIn.Get('events')
treelist.append(tIn)
return tIn,fIn
def enable(regionName):
if toProcess:
return (toProcess==regionName)
else:
return True
# input
tZll,fZll = getTree('ZJets')
tZvv,fZvv = getTree('ZtoNuNu')
tWlv,fWlv = getTree('WJets')
#tWlv_nlo,fWlv_nlo = getTree('WJets_nlo')
tewkZll,fewkZll = getTree('EWKZJets')
tewkZvv,fewkZvv = getTree('EWKZtoNuNu')
tewkWlv,fewkWlv = getTree('EWKWJets')
tPho,fPho = getTree('GJets')
tTTbar,fTT = getTree('TTbar')
tVV,fVV = getTree('Diboson')
tQCD,fQCD = getTree('QCD')
tST,fST = getTree('SingleTop')
tMET,fMET = getTree('MET')
tSingleEle,fSEle = getTree('SingleElectron')
tSinglePho,fSPho = getTree('SinglePhoton')
tVBF,fVBF = getTree('VBF_H125')
tGGF,fGGF = getTree('GGF_H125')
tAllWlv = root.TChain('events')
for f in ['WJets','EWKWJets']:
tAllWlv.AddFile(baseDir+'/'+f+'.root')
tAllZll = root.TChain('events')
for f in ['ZJets','EWKZJets']:
tAllZll.AddFile(baseDir+'/'+f+'.root')
tAllZvv = root.TChain('events')
for f in ['ZtoNuNu','EWKZtoNuNu']:
tAllZvv.AddFile(baseDir+'/'+f+'.root')
treelist += [tAllWlv,tAllZll,tAllZvv]
factory.cd()
regions = {}
processes = {}
vm = root.VariableMap()
vm.AddVar('met','met')
vm.AddVar('metPhi','metPhi')
vm.AddVar('genBosonPt','genBos_pt')
vm.AddVar('genBosonPhi','genBos_phi')
#for x in ['jjDEta','mjj','jot1Pt','jot2Pt','jot1Eta','jot2Eta','minJetMetDPhi_withendcap']:
for x in ['jjDEta','mjj','minJetMetDPhi_withendcap']:
vm.AddVar(x,x)
vm.AddFormula('jjDPhi','fabs(SignedDeltaPhi(jot1Phi,jot2Phi))')
# test region
if enable('test'):
regions['test'] = root.Region('test')
cut = sel.cuts['signal']
weight = '%f*%s'%(lumi,sel.weights['signal'])
processes['test'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Diboson',tVV,vm,cut,weight),
]
for p in processes['test']:
regions['test'].AddProcess(p)
factory.AddRegion(regions['test'])
# signal region
if enable('signal'):
regions['signal'] = root.Region('signal')
cut = sel.cuts['signal']
weight = '%f*%s'%(lumi,sel.weights['signal'])
PInfo('makeLimitForest.py',cut)
processes['signal'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('VBF_H125',tVBF,vm,cut,weight),
root.Process('GGF_H125',tGGF,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['signal']:
regions['signal'].AddProcess(p)
factory.AddRegion(regions['signal'])
# wmn
if enable('wmn'):
regions['wmn'] = root.Region('wmn')
cut = sel.cuts['wmn']
weight = '%f*%s'%(lumi,sel.weights['wmn'])
processes['wmn'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['wmn']:
regions['wmn'].AddProcess(p)
factory.AddRegion(regions['wmn'])
# wen
if enable('wen'):
regions['wen'] = root.Region('wen')
cut = sel.cuts['wen']
weight = '%f*%s'%(lumi,sel.weights['wen'])
processes['wen'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['wen']:
regions['wen'].AddProcess(p)
factory.AddRegion(regions['wen'])
# zmm
if enable('zmm'):
regions['zmm'] = root.Region('zmm')
cut = sel.cuts['zmm']
weight = '%f*%s'%(lumi,sel.weights['zmm'])
processes['zmm'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['zmm']:
regions['zmm'].AddProcess(p)
factory.AddRegion(regions['zmm'])
# zee
if enable('zee'):
regions['zee'] = root.Region('zee')
cut = sel.cuts['zee']
weight = '%f*%s'%(lumi,sel.weights['zee'])
processes['zee'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('Wlv',tWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('Zll',tZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('ewkZvv',tewkZvv,vm,cut,weight),
root.Process('ewkWlv',tewkWlv,vm,cut,weight),
root.Process('ewkZll',tewkZll,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
root.Process('allWlv',tAllWlv,vm,cut,tTIMES('wkfactor*ewk_w',weight)),
root.Process('allZvv',tAllZvv,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
root.Process('allZll',tAllZll,vm,cut,tTIMES('zkfactor*ewk_z',weight)),
]
for p in processes['zee']:
regions['zee'].AddProcess(p)
factory.AddRegion(regions['zee'])
# photon
if enable('pho'):
regions['pho'] = root.Region('pho')
cut = sel.cuts['pho']
weight = '%f*%s'%(lumi,sel.weights['pho'])
processes['pho'] = [
root.Process('Data',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'1'),
root.Process('Pho',tPho,vm,cut,tTIMES('akfactor*ewk_a',weight)),
# root.Process('QCD',tSinglePho,vmA,dataCut(cut,phoTrigger),'photonPurityWeight'),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['pho']:
regions['pho'].AddProcess(p)
factory.AddRegion(regions['pho'])
PInfo('makeLimitForest','Starting '+str(toProcess))
factory.Run()
PInfo('makeLimitForest','Finishing '+str(toProcess))
for t in treelist:
t.SetDirectory(0)
factory.Output() | en | 0.27045 | #!/usr/bin/env python # kinematics #import PandaAnalysis.VBF.Selection as sel #import PandaAnalysis.VBF.MonojetSelection as sel # return tAND('metFilter==1',tAND(trigger,basecut)) #return tAND(tAND(trigger,basecut),'runNum<=276811') # input #tWlv_nlo,fWlv_nlo = getTree('WJets_nlo') #for x in ['jjDEta','mjj','jot1Pt','jot2Pt','jot1Eta','jot2Eta','minJetMetDPhi_withendcap']: # test region # signal region # wmn # wen # zmm # zee # photon # root.Process('QCD',tSinglePho,vmA,dataCut(cut,phoTrigger),'photonPurityWeight'), | 2.11702 | 2 |
pypesto/optimize/optimize.py | m-philipps/pyPESTO | 0 | 6620063 | <reponame>m-philipps/pyPESTO
import logging
from typing import Callable, Iterable, Union
from ..engine import Engine, SingleCoreEngine
from ..objective import HistoryOptions
from ..problem import Problem
from ..result import Result
from ..startpoint import StartpointMethod, to_startpoint_method, uniform
from ..store import autosave
from .optimizer import Optimizer, ScipyOptimizer
from .options import OptimizeOptions
from .task import OptimizerTask
from .util import (
assign_ids,
bound_n_starts_from_env,
postprocess_hdf5_history,
preprocess_hdf5_history,
)
logger = logging.getLogger(__name__)
def minimize(
problem: Problem,
optimizer: Optimizer = None,
n_starts: int = 100,
ids: Iterable[str] = None,
startpoint_method: Union[StartpointMethod, Callable, bool] = None,
result: Result = None,
engine: Engine = None,
progress_bar: bool = True,
options: OptimizeOptions = None,
history_options: HistoryOptions = None,
filename: Union[str, Callable, None] = "Auto",
) -> Result:
"""
Do multistart optimization.
Parameters
----------
problem:
The problem to be solved.
optimizer:
The optimizer to be used n_starts times.
n_starts:
Number of starts of the optimizer.
ids:
Ids assigned to the startpoints.
startpoint_method:
Method for how to choose start points. False means the optimizer does
not require start points, e.g. for the 'PyswarmOptimizer'.
result:
A result object to append the optimization results to. For example,
one might append more runs to a previous optimization. If None,
a new object is created.
engine:
Parallelization engine. Defaults to sequential execution on a
SingleCoreEngine.
progress_bar:
Whether to display a progress bar.
options:
Various options applied to the multistart optimization.
history_options:
Optimizer history options.
filename:
Name of the hdf5 file, where the result will be saved. Default is
"Auto", in which case it will automatically generate a file named
`year_month_day_optimization_result.hdf5`. Deactivate saving by
setting filename to `None`.
Optionally a method, see docs for `pypesto.store.auto.autosave`.
Returns
-------
result:
Result object containing the results of all multistarts in
`result.optimize_result`.
"""
# optimizer
if optimizer is None:
optimizer = ScipyOptimizer()
# number of starts
n_starts = bound_n_starts_from_env(n_starts)
# startpoint method
if startpoint_method is None:
startpoint_method = uniform
# convert startpoint method to class instance
startpoint_method = to_startpoint_method(startpoint_method)
# check options
if options is None:
options = OptimizeOptions()
options = OptimizeOptions.assert_instance(options)
# history options
if history_options is None:
history_options = HistoryOptions()
history_options = HistoryOptions.assert_instance(history_options)
# assign startpoints
startpoints = startpoint_method(
n_starts=n_starts,
problem=problem,
)
ids = assign_ids(
n_starts=n_starts,
ids=ids,
result=result,
)
# prepare result
if result is None:
result = Result(problem)
# engine
if engine is None:
engine = SingleCoreEngine()
# change to one hdf5 storage file per start if parallel and if hdf5
history_file = history_options.storage_file
history_requires_postprocessing = preprocess_hdf5_history(
history_options, engine
)
# define tasks
tasks = []
for startpoint, id in zip(startpoints, ids):
task = OptimizerTask(
optimizer=optimizer,
problem=problem,
x0=startpoint,
id=id,
history_options=history_options,
optimize_options=options,
)
tasks.append(task)
# perform multistart optimization
ret = engine.execute(tasks, progress_bar=progress_bar)
# merge hdf5 history files
if history_requires_postprocessing:
postprocess_hdf5_history(ret, history_file, history_options)
# aggregate results
for optimizer_result in ret:
result.optimize_result.append(optimizer_result)
# sort by best fval
result.optimize_result.sort()
# if history file provided, set storage file to that one
if filename == "Auto" and history_file is not None:
filename = history_file
autosave(filename=filename, result=result, store_type="optimize")
return result
| import logging
from typing import Callable, Iterable, Union
from ..engine import Engine, SingleCoreEngine
from ..objective import HistoryOptions
from ..problem import Problem
from ..result import Result
from ..startpoint import StartpointMethod, to_startpoint_method, uniform
from ..store import autosave
from .optimizer import Optimizer, ScipyOptimizer
from .options import OptimizeOptions
from .task import OptimizerTask
from .util import (
assign_ids,
bound_n_starts_from_env,
postprocess_hdf5_history,
preprocess_hdf5_history,
)
logger = logging.getLogger(__name__)
def minimize(
problem: Problem,
optimizer: Optimizer = None,
n_starts: int = 100,
ids: Iterable[str] = None,
startpoint_method: Union[StartpointMethod, Callable, bool] = None,
result: Result = None,
engine: Engine = None,
progress_bar: bool = True,
options: OptimizeOptions = None,
history_options: HistoryOptions = None,
filename: Union[str, Callable, None] = "Auto",
) -> Result:
"""
Do multistart optimization.
Parameters
----------
problem:
The problem to be solved.
optimizer:
The optimizer to be used n_starts times.
n_starts:
Number of starts of the optimizer.
ids:
Ids assigned to the startpoints.
startpoint_method:
Method for how to choose start points. False means the optimizer does
not require start points, e.g. for the 'PyswarmOptimizer'.
result:
A result object to append the optimization results to. For example,
one might append more runs to a previous optimization. If None,
a new object is created.
engine:
Parallelization engine. Defaults to sequential execution on a
SingleCoreEngine.
progress_bar:
Whether to display a progress bar.
options:
Various options applied to the multistart optimization.
history_options:
Optimizer history options.
filename:
Name of the hdf5 file, where the result will be saved. Default is
"Auto", in which case it will automatically generate a file named
`year_month_day_optimization_result.hdf5`. Deactivate saving by
setting filename to `None`.
Optionally a method, see docs for `pypesto.store.auto.autosave`.
Returns
-------
result:
Result object containing the results of all multistarts in
`result.optimize_result`.
"""
# optimizer
if optimizer is None:
optimizer = ScipyOptimizer()
# number of starts
n_starts = bound_n_starts_from_env(n_starts)
# startpoint method
if startpoint_method is None:
startpoint_method = uniform
# convert startpoint method to class instance
startpoint_method = to_startpoint_method(startpoint_method)
# check options
if options is None:
options = OptimizeOptions()
options = OptimizeOptions.assert_instance(options)
# history options
if history_options is None:
history_options = HistoryOptions()
history_options = HistoryOptions.assert_instance(history_options)
# assign startpoints
startpoints = startpoint_method(
n_starts=n_starts,
problem=problem,
)
ids = assign_ids(
n_starts=n_starts,
ids=ids,
result=result,
)
# prepare result
if result is None:
result = Result(problem)
# engine
if engine is None:
engine = SingleCoreEngine()
# change to one hdf5 storage file per start if parallel and if hdf5
history_file = history_options.storage_file
history_requires_postprocessing = preprocess_hdf5_history(
history_options, engine
)
# define tasks
tasks = []
for startpoint, id in zip(startpoints, ids):
task = OptimizerTask(
optimizer=optimizer,
problem=problem,
x0=startpoint,
id=id,
history_options=history_options,
optimize_options=options,
)
tasks.append(task)
# perform multistart optimization
ret = engine.execute(tasks, progress_bar=progress_bar)
# merge hdf5 history files
if history_requires_postprocessing:
postprocess_hdf5_history(ret, history_file, history_options)
# aggregate results
for optimizer_result in ret:
result.optimize_result.append(optimizer_result)
# sort by best fval
result.optimize_result.sort()
# if history file provided, set storage file to that one
if filename == "Auto" and history_file is not None:
filename = history_file
autosave(filename=filename, result=result, store_type="optimize")
return result | en | 0.732283 | Do multistart optimization. Parameters ---------- problem: The problem to be solved. optimizer: The optimizer to be used n_starts times. n_starts: Number of starts of the optimizer. ids: Ids assigned to the startpoints. startpoint_method: Method for how to choose start points. False means the optimizer does not require start points, e.g. for the 'PyswarmOptimizer'. result: A result object to append the optimization results to. For example, one might append more runs to a previous optimization. If None, a new object is created. engine: Parallelization engine. Defaults to sequential execution on a SingleCoreEngine. progress_bar: Whether to display a progress bar. options: Various options applied to the multistart optimization. history_options: Optimizer history options. filename: Name of the hdf5 file, where the result will be saved. Default is "Auto", in which case it will automatically generate a file named `year_month_day_optimization_result.hdf5`. Deactivate saving by setting filename to `None`. Optionally a method, see docs for `pypesto.store.auto.autosave`. Returns ------- result: Result object containing the results of all multistarts in `result.optimize_result`. # optimizer # number of starts # startpoint method # convert startpoint method to class instance # check options # history options # assign startpoints # prepare result # engine # change to one hdf5 storage file per start if parallel and if hdf5 # define tasks # perform multistart optimization # merge hdf5 history files # aggregate results # sort by best fval # if history file provided, set storage file to that one | 2.484802 | 2 |
flask_twitts.py | stanmain/flask_twitts | 0 | 6620064 | <gh_stars>0
# Copyright © 2018 <NAME>. All rights reserved.
"""Flask-Twitts application."""
from app import create_app
app = create_app()
| # Copyright © 2018 <NAME>. All rights reserved.
"""Flask-Twitts application."""
from app import create_app
app = create_app() | en | 0.850978 | # Copyright © 2018 <NAME>. All rights reserved. Flask-Twitts application. | 1.286771 | 1 |
bplistlib/functions.py | jaysonlarose/bplistlib | 2 | 6620065 | # encoding: utf-8
"""This file contains private functions for the bplistlib module."""
def get_byte_width(value_to_store, max_byte_width):
"""
Return the minimum number of bytes needed to store a given value as an
unsigned integer. If the byte width needed exceeds max_byte_width, raise
ValueError."""
for byte_width in range(max_byte_width):
if 0x100 ** byte_width <= value_to_store < 0x100 ** (byte_width + 1):
return byte_width + 1
raise ValueError
def find_with_type(value, list_):
"""
Find value in list_, matching both for equality and type, and
return the index it was found at. If not found, raise ValueError.
"""
for index, comparison_value in enumerate(list_):
if (type(value) == type(comparison_value) and
value == comparison_value):
return index
raise ValueError
def flatten_object_list(object_list, objects):
"""Convert a list of objects to a list of references."""
reference_list = []
for object_ in object_list:
reference = find_with_type(object_, objects)
reference_list.append(reference)
return reference_list
def unflatten_reference_list(references, objects, object_handler):
"""Convert a list of references to a list of objects."""
object_list = []
for reference in references:
item = objects[reference]
item = object_handler.unflatten(item, objects)
object_list.append(item)
return object_list
| # encoding: utf-8
"""This file contains private functions for the bplistlib module."""
def get_byte_width(value_to_store, max_byte_width):
"""
Return the minimum number of bytes needed to store a given value as an
unsigned integer. If the byte width needed exceeds max_byte_width, raise
ValueError."""
for byte_width in range(max_byte_width):
if 0x100 ** byte_width <= value_to_store < 0x100 ** (byte_width + 1):
return byte_width + 1
raise ValueError
def find_with_type(value, list_):
"""
Find value in list_, matching both for equality and type, and
return the index it was found at. If not found, raise ValueError.
"""
for index, comparison_value in enumerate(list_):
if (type(value) == type(comparison_value) and
value == comparison_value):
return index
raise ValueError
def flatten_object_list(object_list, objects):
"""Convert a list of objects to a list of references."""
reference_list = []
for object_ in object_list:
reference = find_with_type(object_, objects)
reference_list.append(reference)
return reference_list
def unflatten_reference_list(references, objects, object_handler):
"""Convert a list of references to a list of objects."""
object_list = []
for reference in references:
item = objects[reference]
item = object_handler.unflatten(item, objects)
object_list.append(item)
return object_list
| en | 0.796867 | # encoding: utf-8 This file contains private functions for the bplistlib module. Return the minimum number of bytes needed to store a given value as an unsigned integer. If the byte width needed exceeds max_byte_width, raise ValueError. Find value in list_, matching both for equality and type, and return the index it was found at. If not found, raise ValueError. Convert a list of objects to a list of references. Convert a list of references to a list of objects. | 2.89607 | 3 |
packages/girder_worker/girder_worker/core/utils.py | ShenQianwithC/HistomicsTK | 0 | 6620066 | <gh_stars>0
import contextlib
import errno
import functools
import imp
import json
import os
import girder_worker
import girder_worker.plugins
import select
import shutil
import six
import subprocess
import stat
import sys
import tempfile
import traceback
class TerminalColor(object):
"""
Provides a set of values that can be used to color text in the terminal.
"""
ERROR = '\033[1;91m'
SUCCESS = '\033[32m'
WARNING = '\033[1;33m'
INFO = '\033[35m'
ENDC = '\033[0m'
@staticmethod
def _color(tag, text):
return ''.join((tag, text, TerminalColor.ENDC))
@staticmethod
def error(text):
return TerminalColor._color(TerminalColor.ERROR, text)
@staticmethod
def success(text):
return TerminalColor._color(TerminalColor.SUCCESS, text)
@staticmethod
def warning(text):
return TerminalColor._color(TerminalColor.WARNING, text)
@staticmethod
def info(text):
return TerminalColor._color(TerminalColor.INFO, text)
def toposort(data):
"""
General-purpose topological sort function. Dependencies are expressed as a
dictionary whose keys are items and whose values are a set of dependent
items. Output is a list of sets in topological order. This is a generator
function that returns a sequence of sets in topological order.
:param data: The dependency information.
:type data: dict
:returns: Yields a list of sorted sets representing the sorted order.
"""
if not data:
return
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra = functools.reduce(
set.union, data.itervalues()) - set(data.iterkeys())
# Add empty dependences where needed
data.update({item: set() for item in extra})
# Perform the toposort.
while True:
ordered = set(item for item, dep in data.iteritems() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.iteritems() if item not in ordered}
# Detect any cycles in the dependency graph.
if data:
raise Exception('Cyclic dependencies detected:\n%s' % '\n'.join(
repr(x) for x in data.iteritems()))
@contextlib.contextmanager
def tmpdir(cleanup=True):
# Make the temp dir underneath tmp_root config setting
root = os.path.abspath(girder_worker.config.get(
'girder_worker', 'tmp_root'))
try:
os.makedirs(root)
except OSError:
if not os.path.isdir(root):
raise
path = tempfile.mkdtemp(dir=root)
try:
yield path
finally:
# Cleanup the temp dir
if cleanup and os.path.isdir(path):
shutil.rmtree(path)
def with_tmpdir(fn):
"""
This function is provided as a convenience to allow use as a decorator of
a function rather than using "with tmpdir()" around the whole function
body. It passes the generated temp dir path into the function as the
special kwarg "_tempdir".
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if '_tempdir' in kwargs:
return fn(*args, **kwargs)
cleanup = kwargs.get('cleanup', True)
with tmpdir(cleanup=cleanup) as tempdir:
kwargs['_tempdir'] = tempdir
return fn(*args, **kwargs)
return wrapped
class PluginNotFoundException(Exception):
pass
def load_plugins(plugins, paths, ignore_errors=False, quiet=False):
"""
Enable a list of plugins.
:param plugins: The plugins to enable.
:type plugins: list or tuple of str
:param paths: Plugin search paths.
:type paths: list or tuple of str
:param ignore_errors: If a plugin fails to load, this determines whether to
raise the exception or simply print an error and keep going.
:type ignore_errors: bool
:param quiet: Optionally suppress printing status messages.
:type quiet: bool
:return: Set of plugins that were loaded successfully.
:rtype: set
"""
loaded = set()
for plugin in plugins:
try:
load_plugin(plugin, paths)
loaded.add(plugin)
if not quiet:
print(TerminalColor.success('Loaded plugin "%s"' % plugin))
except Exception:
print(TerminalColor.error(
'ERROR: Failed to load plugin "%s":' % plugin))
if ignore_errors:
traceback.print_exc()
else:
raise
return loaded
def load_plugin(name, paths):
"""
Enable a plugin for the worker runtime.
:param name: The name of the plugin to load, which is also the name of its
containing directory.
:type name: str
:param paths: Plugin search paths.
:type paths: list or tuple of str
"""
for path in paths:
plugin_dir = os.path.join(path, name)
if os.path.isdir(plugin_dir):
module_name = 'girder_worker.plugins.' + name
if module_name not in sys.modules:
fp, pathname, description = imp.find_module(name, [path])
module = imp.load_module(module_name, fp, pathname, description)
setattr(girder_worker.plugins, name, module)
else:
module = sys.modules[module_name]
if hasattr(module, 'load'):
module.load({
'plugin_dir': plugin_dir,
'name': name
})
break
else:
raise PluginNotFoundException(
'Plugin "%s" not found. Looked in: \n %s\n' % (
name, '\n '.join(paths)))
def _close_pipes(rds, wds, input_pipes, output_pipes, close_output_pipe):
"""
Helper to close remaining input and output adapters after the subprocess
completes.
"""
# close any remaining output adapters
for fd in rds:
if fd in output_pipes:
output_pipes[fd].close()
if close_output_pipe(fd):
os.close(fd)
# close any remaining input adapters
for fd in wds:
if fd in input_pipes:
os.close(fd)
def _setup_input_pipes(input_pipes):
"""
Given a mapping of input pipes, return a tuple with 2 elements. The first is
a list of file descriptors to pass to ``select`` as writeable descriptors.
The second is a dictionary mapping paths to existing named pipes to their
adapters.
"""
wds = []
fifos = {}
for pipe, adapter in six.viewitems(input_pipes):
if isinstance(pipe, int):
# This is assumed to be an open system-level file descriptor
wds.append(pipe)
else:
if not os.path.exists(pipe):
raise Exception('Input pipe does not exist: %s' % pipe)
if not stat.S_ISFIFO(os.stat(pipe).st_mode):
raise Exception('Input pipe must be a fifo object: %s' % pipe)
fifos[pipe] = adapter
return wds, fifos
def _open_ipipes(wds, fifos, input_pipes):
"""
This will attempt to open the named pipes in the set of ``fifos`` for
writing, which will only succeed if the subprocess has opened them for
reading already. This modifies and returns the list of write descriptors,
the list of waiting fifo names, and the mapping back to input adapters.
"""
for fifo in fifos.copy():
try:
fd = os.open(fifo, os.O_WRONLY | os.O_NONBLOCK)
input_pipes[fd] = fifos.pop(fifo)
wds.append(fd)
except OSError as e:
if e.errno != errno.ENXIO:
raise e
return wds, fifos, input_pipes
def select_loop(exit_condition=lambda: True, close_output=lambda x: True,
outputs=None, inputs=None):
"""
Run a select loop for a set of input and output pipes
:param exit_condition: A function to evaluate to determine if the select
loop should terminate if all pipes are empty.
:type exit_condition: function
:param close_output: A function to use to test whether a output
should be closed when EOF is reached. Certain output pipes such as
stdout, stderr should not be closed.
:param outputs: This should be a dictionary mapping pipe descriptors
to instances of ``StreamPushAdapter`` that should handle the data from
the stream. The keys of this dictionary are open file descriptors,
which are integers.
:type outputs: dict
:param inputs: This should be a dictionary mapping pipe descriptors
to instances of ``StreamFetchAdapter`` that should handle sending
input data in chunks. Keys in this dictionary can be either open file
descriptors (integers) or a string representing a path to an existing
fifo on the filesystem. This second case supports the use of named
pipes, since they must be opened for reading before they can be opened
for writing
:type inputs: dict
"""
BUF_LEN = 65536
inputs = inputs or {}
outputs = outputs or {}
rds = [fd for fd in outputs.keys() if isinstance(fd, int)]
wds, fifos = _setup_input_pipes(inputs)
try:
while True:
# We evaluate this first so that we get one last iteration of
# of the loop before breaking out of the loop.
exit = exit_condition()
# get ready pipes, timeout of 100 ms
readable, writable, _ = select.select(rds, wds, (), 0.1)
for ready_fd in readable:
buf = os.read(ready_fd, BUF_LEN)
if buf:
outputs[ready_fd].write(buf)
else:
outputs[ready_fd].close()
# Should we close this pipe? In the case of stdout or stderr
# bad things happen if parent closes
if close_output(ready_fd):
os.close(ready_fd)
rds.remove(ready_fd)
for ready_fd in writable:
# TODO for now it's OK for the input reads to block since
# input generally happens first, but we should consider how to
# support non-blocking stream inputs in the future.
buf = inputs[ready_fd].read(BUF_LEN)
if buf:
os.write(ready_fd, buf)
else: # end of stream
wds.remove(ready_fd)
os.close(ready_fd)
wds, fifos, inputs = _open_ipipes(wds, fifos, inputs)
# all pipes empty?
empty = (not rds or not readable) and (not wds or not writable)
if (empty and exit):
break
finally:
_close_pipes(rds, wds, inputs, outputs, close_output)
def run_process(command, output_pipes=None, input_pipes=None):
"""
Run a subprocess, and listen for its outputs on various pipes.
:param command: The command to run.
:type command: list of str
:param output_pipes: This should be a dictionary mapping pipe descriptors
to instances of ``StreamPushAdapter`` that should handle the data from
the stream. Normally, keys of this dictionary are open file descriptors,
which are integers. There are two special cases where they are not,
which are the keys ``'_stdout'`` and ``'_stderr'``. These special keys
correspond to the stdout and stderr pipes that will be created for the
subprocess. If these are not set in the ``output_pipes`` map, the
default behavior is to direct them to the stdout and stderr of the
current process.
:type output_pipes: dict
:param input_pipes: This should be a dictionary mapping pipe descriptors
to instances of ``StreamFetchAdapter`` that should handle sending
input data in chunks. Keys in this dictionary can be either open file
descriptors (integers), the special value ``'_stdin'`` for standard
input, or a string representing a path to an existing fifo on the
filesystem. This third case supports the use of named pipes, since they
must be opened for reading before they can be opened for writing
:type input_pipes: dict
"""
p = subprocess.Popen(args=command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
input_pipes = input_pipes or {}
output_pipes = output_pipes or {}
# we now know subprocess stdout and stderr filenos, so bind the adapters
stdout = p.stdout.fileno()
stderr = p.stderr.fileno()
stdin = p.stdin.fileno()
output_pipes[stdout] = output_pipes.get(
'_stdout', WritePipeAdapter({}, sys.stdout))
output_pipes[stderr] = output_pipes.get(
'_stderr', WritePipeAdapter({}, sys.stderr))
# Special case for _stdin
if '_stdin' in input_pipes:
input_pipes[stdin] = input_pipes['_stdin']
def exit_condition():
status = p.poll()
return status is not None
def close_output_pipe(pipe):
return pipe not in (stdout, stderr)
try:
select_loop(exit_condition=exit_condition,
close_output=close_output_pipe,
outputs=output_pipes, inputs=input_pipes)
except Exception:
p.kill() # kill child process if something went wrong on our end
raise
return p
class StreamFetchAdapter(object):
"""
This represents the interface that must be implemented by fetch adapters
for IO modes that want to implement streaming input.
"""
def __init__(self, input_spec):
self.input_spec = input_spec
def read(self, buf_len):
"""
Fetch adapters must implement this method, which is responsible for
reading up to ``self.buf_len`` bytes from the stream. For now, this is
expected to be a blocking read, and should return an empty string to
indicate the end of the stream.
"""
raise NotImplemented
class MemoryFetchAdapter(StreamFetchAdapter):
def __init__(self, input_spec, data):
"""
Simply reads data from memory. This can be used to map traditional
(non-streaming) inputs to pipes when using ``run_process``. This is
roughly identical behavior to BytesIO.
"""
super(MemoryFetchAdapter, self).__init__(input_spec)
self._stream = six.BytesIO(data)
def read(self, buf_len):
return self._stream.read(buf_len)
class StreamPushAdapter(object):
"""
This represents the interface that must be implemented by push adapters for
IO modes that want to implement streaming output.
"""
def __init__(self, output_spec):
"""
Initialize the adpater based on the output spec.
"""
self.output_spec = output_spec
def write(self, buf):
"""
Write a chunk of data to the output stream.
"""
raise NotImplemented
def close(self):
"""
Close the output stream. Called after the last data is sent.
"""
pass
class WritePipeAdapter(StreamPushAdapter):
"""
Simply wraps another pipe that contains a ``write`` method. This is useful
for wrapping ``sys.stdout`` and ``sys.stderr``, where we want to call
``write`` but not ``close`` on them.
"""
def __init__(self, output_spec, pipe):
"""
:param pipe: An object containing a ``write`` method, e.g. sys.stdout.
"""
super(WritePipeAdapter, self).__init__(output_spec)
self.pipe = pipe
def write(self, buf):
self.pipe.write(buf)
class AccumulateDictAdapter(StreamPushAdapter):
def __init__(self, output_spec, key, dictionary=None):
"""
Appends all data from a stream under a key inside a dict. Can be used
to bind traditional (non-streaming) outputs to pipes when using
``run_process``.
:param output_spec: The output specification.
:type output_spec: dict
:param key: The key to accumulate the data under.
:type key: hashable
:param dictionary: Dictionary to write into. If not specified, uses the
output_spec.
:type dictionary: dict
"""
super(AccumulateDictAdapter, self).__init__(output_spec)
if dictionary is None:
dictionary = output_spec
if key not in dictionary:
dictionary[key] = ''
self.dictionary = dictionary
self.key = key
def write(self, buf):
self.dictionary[self.key] += buf
class JobProgressAdapter(StreamPushAdapter):
def __init__(self, job_manager):
"""
This reads structured JSON documents one line at a time and sends
them as progress events via the JobManager.
:param job_manager: The job manager to use to send the progress events.
:type job_manager: girder_worker.utils.JobManager
"""
super(JobProgressAdapter, self).__init__(None)
self.job_manager = job_manager
self._buf = b''
def write(self, buf):
lines = buf.split(b'\n')
if self._buf:
lines[0] = self._buf + lines[0]
self._buf = lines[-1]
for line in lines[:-1]:
self._parse(line)
def _parse(self, line):
try:
doc = json.loads(line.decode('utf8'))
except ValueError:
return # TODO log?
if not isinstance(doc, dict):
return # TODO log?
self.job_manager.updateProgress(
total=doc.get('total'), current=doc.get('current'), message=doc.get('message'))
| import contextlib
import errno
import functools
import imp
import json
import os
import girder_worker
import girder_worker.plugins
import select
import shutil
import six
import subprocess
import stat
import sys
import tempfile
import traceback
class TerminalColor(object):
"""
Provides a set of values that can be used to color text in the terminal.
"""
ERROR = '\033[1;91m'
SUCCESS = '\033[32m'
WARNING = '\033[1;33m'
INFO = '\033[35m'
ENDC = '\033[0m'
@staticmethod
def _color(tag, text):
return ''.join((tag, text, TerminalColor.ENDC))
@staticmethod
def error(text):
return TerminalColor._color(TerminalColor.ERROR, text)
@staticmethod
def success(text):
return TerminalColor._color(TerminalColor.SUCCESS, text)
@staticmethod
def warning(text):
return TerminalColor._color(TerminalColor.WARNING, text)
@staticmethod
def info(text):
return TerminalColor._color(TerminalColor.INFO, text)
def toposort(data):
"""
General-purpose topological sort function. Dependencies are expressed as a
dictionary whose keys are items and whose values are a set of dependent
items. Output is a list of sets in topological order. This is a generator
function that returns a sequence of sets in topological order.
:param data: The dependency information.
:type data: dict
:returns: Yields a list of sorted sets representing the sorted order.
"""
if not data:
return
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra = functools.reduce(
set.union, data.itervalues()) - set(data.iterkeys())
# Add empty dependences where needed
data.update({item: set() for item in extra})
# Perform the toposort.
while True:
ordered = set(item for item, dep in data.iteritems() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.iteritems() if item not in ordered}
# Detect any cycles in the dependency graph.
if data:
raise Exception('Cyclic dependencies detected:\n%s' % '\n'.join(
repr(x) for x in data.iteritems()))
@contextlib.contextmanager
def tmpdir(cleanup=True):
# Make the temp dir underneath tmp_root config setting
root = os.path.abspath(girder_worker.config.get(
'girder_worker', 'tmp_root'))
try:
os.makedirs(root)
except OSError:
if not os.path.isdir(root):
raise
path = tempfile.mkdtemp(dir=root)
try:
yield path
finally:
# Cleanup the temp dir
if cleanup and os.path.isdir(path):
shutil.rmtree(path)
def with_tmpdir(fn):
"""
This function is provided as a convenience to allow use as a decorator of
a function rather than using "with tmpdir()" around the whole function
body. It passes the generated temp dir path into the function as the
special kwarg "_tempdir".
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if '_tempdir' in kwargs:
return fn(*args, **kwargs)
cleanup = kwargs.get('cleanup', True)
with tmpdir(cleanup=cleanup) as tempdir:
kwargs['_tempdir'] = tempdir
return fn(*args, **kwargs)
return wrapped
class PluginNotFoundException(Exception):
pass
def load_plugins(plugins, paths, ignore_errors=False, quiet=False):
"""
Enable a list of plugins.
:param plugins: The plugins to enable.
:type plugins: list or tuple of str
:param paths: Plugin search paths.
:type paths: list or tuple of str
:param ignore_errors: If a plugin fails to load, this determines whether to
raise the exception or simply print an error and keep going.
:type ignore_errors: bool
:param quiet: Optionally suppress printing status messages.
:type quiet: bool
:return: Set of plugins that were loaded successfully.
:rtype: set
"""
loaded = set()
for plugin in plugins:
try:
load_plugin(plugin, paths)
loaded.add(plugin)
if not quiet:
print(TerminalColor.success('Loaded plugin "%s"' % plugin))
except Exception:
print(TerminalColor.error(
'ERROR: Failed to load plugin "%s":' % plugin))
if ignore_errors:
traceback.print_exc()
else:
raise
return loaded
def load_plugin(name, paths):
"""
Enable a plugin for the worker runtime.
:param name: The name of the plugin to load, which is also the name of its
containing directory.
:type name: str
:param paths: Plugin search paths.
:type paths: list or tuple of str
"""
for path in paths:
plugin_dir = os.path.join(path, name)
if os.path.isdir(plugin_dir):
module_name = 'girder_worker.plugins.' + name
if module_name not in sys.modules:
fp, pathname, description = imp.find_module(name, [path])
module = imp.load_module(module_name, fp, pathname, description)
setattr(girder_worker.plugins, name, module)
else:
module = sys.modules[module_name]
if hasattr(module, 'load'):
module.load({
'plugin_dir': plugin_dir,
'name': name
})
break
else:
raise PluginNotFoundException(
'Plugin "%s" not found. Looked in: \n %s\n' % (
name, '\n '.join(paths)))
def _close_pipes(rds, wds, input_pipes, output_pipes, close_output_pipe):
"""
Helper to close remaining input and output adapters after the subprocess
completes.
"""
# close any remaining output adapters
for fd in rds:
if fd in output_pipes:
output_pipes[fd].close()
if close_output_pipe(fd):
os.close(fd)
# close any remaining input adapters
for fd in wds:
if fd in input_pipes:
os.close(fd)
def _setup_input_pipes(input_pipes):
"""
Given a mapping of input pipes, return a tuple with 2 elements. The first is
a list of file descriptors to pass to ``select`` as writeable descriptors.
The second is a dictionary mapping paths to existing named pipes to their
adapters.
"""
wds = []
fifos = {}
for pipe, adapter in six.viewitems(input_pipes):
if isinstance(pipe, int):
# This is assumed to be an open system-level file descriptor
wds.append(pipe)
else:
if not os.path.exists(pipe):
raise Exception('Input pipe does not exist: %s' % pipe)
if not stat.S_ISFIFO(os.stat(pipe).st_mode):
raise Exception('Input pipe must be a fifo object: %s' % pipe)
fifos[pipe] = adapter
return wds, fifos
def _open_ipipes(wds, fifos, input_pipes):
"""
This will attempt to open the named pipes in the set of ``fifos`` for
writing, which will only succeed if the subprocess has opened them for
reading already. This modifies and returns the list of write descriptors,
the list of waiting fifo names, and the mapping back to input adapters.
"""
for fifo in fifos.copy():
try:
fd = os.open(fifo, os.O_WRONLY | os.O_NONBLOCK)
input_pipes[fd] = fifos.pop(fifo)
wds.append(fd)
except OSError as e:
if e.errno != errno.ENXIO:
raise e
return wds, fifos, input_pipes
def select_loop(exit_condition=lambda: True, close_output=lambda x: True,
outputs=None, inputs=None):
"""
Run a select loop for a set of input and output pipes
:param exit_condition: A function to evaluate to determine if the select
loop should terminate if all pipes are empty.
:type exit_condition: function
:param close_output: A function to use to test whether a output
should be closed when EOF is reached. Certain output pipes such as
stdout, stderr should not be closed.
:param outputs: This should be a dictionary mapping pipe descriptors
to instances of ``StreamPushAdapter`` that should handle the data from
the stream. The keys of this dictionary are open file descriptors,
which are integers.
:type outputs: dict
:param inputs: This should be a dictionary mapping pipe descriptors
to instances of ``StreamFetchAdapter`` that should handle sending
input data in chunks. Keys in this dictionary can be either open file
descriptors (integers) or a string representing a path to an existing
fifo on the filesystem. This second case supports the use of named
pipes, since they must be opened for reading before they can be opened
for writing
:type inputs: dict
"""
BUF_LEN = 65536
inputs = inputs or {}
outputs = outputs or {}
rds = [fd for fd in outputs.keys() if isinstance(fd, int)]
wds, fifos = _setup_input_pipes(inputs)
try:
while True:
# We evaluate this first so that we get one last iteration of
# of the loop before breaking out of the loop.
exit = exit_condition()
# get ready pipes, timeout of 100 ms
readable, writable, _ = select.select(rds, wds, (), 0.1)
for ready_fd in readable:
buf = os.read(ready_fd, BUF_LEN)
if buf:
outputs[ready_fd].write(buf)
else:
outputs[ready_fd].close()
# Should we close this pipe? In the case of stdout or stderr
# bad things happen if parent closes
if close_output(ready_fd):
os.close(ready_fd)
rds.remove(ready_fd)
for ready_fd in writable:
# TODO for now it's OK for the input reads to block since
# input generally happens first, but we should consider how to
# support non-blocking stream inputs in the future.
buf = inputs[ready_fd].read(BUF_LEN)
if buf:
os.write(ready_fd, buf)
else: # end of stream
wds.remove(ready_fd)
os.close(ready_fd)
wds, fifos, inputs = _open_ipipes(wds, fifos, inputs)
# all pipes empty?
empty = (not rds or not readable) and (not wds or not writable)
if (empty and exit):
break
finally:
_close_pipes(rds, wds, inputs, outputs, close_output)
def run_process(command, output_pipes=None, input_pipes=None):
"""
Run a subprocess, and listen for its outputs on various pipes.
:param command: The command to run.
:type command: list of str
:param output_pipes: This should be a dictionary mapping pipe descriptors
to instances of ``StreamPushAdapter`` that should handle the data from
the stream. Normally, keys of this dictionary are open file descriptors,
which are integers. There are two special cases where they are not,
which are the keys ``'_stdout'`` and ``'_stderr'``. These special keys
correspond to the stdout and stderr pipes that will be created for the
subprocess. If these are not set in the ``output_pipes`` map, the
default behavior is to direct them to the stdout and stderr of the
current process.
:type output_pipes: dict
:param input_pipes: This should be a dictionary mapping pipe descriptors
to instances of ``StreamFetchAdapter`` that should handle sending
input data in chunks. Keys in this dictionary can be either open file
descriptors (integers), the special value ``'_stdin'`` for standard
input, or a string representing a path to an existing fifo on the
filesystem. This third case supports the use of named pipes, since they
must be opened for reading before they can be opened for writing
:type input_pipes: dict
"""
p = subprocess.Popen(args=command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
input_pipes = input_pipes or {}
output_pipes = output_pipes or {}
# we now know subprocess stdout and stderr filenos, so bind the adapters
stdout = p.stdout.fileno()
stderr = p.stderr.fileno()
stdin = p.stdin.fileno()
output_pipes[stdout] = output_pipes.get(
'_stdout', WritePipeAdapter({}, sys.stdout))
output_pipes[stderr] = output_pipes.get(
'_stderr', WritePipeAdapter({}, sys.stderr))
# Special case for _stdin
if '_stdin' in input_pipes:
input_pipes[stdin] = input_pipes['_stdin']
def exit_condition():
status = p.poll()
return status is not None
def close_output_pipe(pipe):
return pipe not in (stdout, stderr)
try:
select_loop(exit_condition=exit_condition,
close_output=close_output_pipe,
outputs=output_pipes, inputs=input_pipes)
except Exception:
p.kill() # kill child process if something went wrong on our end
raise
return p
class StreamFetchAdapter(object):
"""
This represents the interface that must be implemented by fetch adapters
for IO modes that want to implement streaming input.
"""
def __init__(self, input_spec):
self.input_spec = input_spec
def read(self, buf_len):
"""
Fetch adapters must implement this method, which is responsible for
reading up to ``self.buf_len`` bytes from the stream. For now, this is
expected to be a blocking read, and should return an empty string to
indicate the end of the stream.
"""
raise NotImplemented
class MemoryFetchAdapter(StreamFetchAdapter):
def __init__(self, input_spec, data):
"""
Simply reads data from memory. This can be used to map traditional
(non-streaming) inputs to pipes when using ``run_process``. This is
roughly identical behavior to BytesIO.
"""
super(MemoryFetchAdapter, self).__init__(input_spec)
self._stream = six.BytesIO(data)
def read(self, buf_len):
return self._stream.read(buf_len)
class StreamPushAdapter(object):
"""
This represents the interface that must be implemented by push adapters for
IO modes that want to implement streaming output.
"""
def __init__(self, output_spec):
"""
Initialize the adpater based on the output spec.
"""
self.output_spec = output_spec
def write(self, buf):
"""
Write a chunk of data to the output stream.
"""
raise NotImplemented
def close(self):
"""
Close the output stream. Called after the last data is sent.
"""
pass
class WritePipeAdapter(StreamPushAdapter):
"""
Simply wraps another pipe that contains a ``write`` method. This is useful
for wrapping ``sys.stdout`` and ``sys.stderr``, where we want to call
``write`` but not ``close`` on them.
"""
def __init__(self, output_spec, pipe):
"""
:param pipe: An object containing a ``write`` method, e.g. sys.stdout.
"""
super(WritePipeAdapter, self).__init__(output_spec)
self.pipe = pipe
def write(self, buf):
self.pipe.write(buf)
class AccumulateDictAdapter(StreamPushAdapter):
def __init__(self, output_spec, key, dictionary=None):
"""
Appends all data from a stream under a key inside a dict. Can be used
to bind traditional (non-streaming) outputs to pipes when using
``run_process``.
:param output_spec: The output specification.
:type output_spec: dict
:param key: The key to accumulate the data under.
:type key: hashable
:param dictionary: Dictionary to write into. If not specified, uses the
output_spec.
:type dictionary: dict
"""
super(AccumulateDictAdapter, self).__init__(output_spec)
if dictionary is None:
dictionary = output_spec
if key not in dictionary:
dictionary[key] = ''
self.dictionary = dictionary
self.key = key
def write(self, buf):
self.dictionary[self.key] += buf
class JobProgressAdapter(StreamPushAdapter):
def __init__(self, job_manager):
"""
This reads structured JSON documents one line at a time and sends
them as progress events via the JobManager.
:param job_manager: The job manager to use to send the progress events.
:type job_manager: girder_worker.utils.JobManager
"""
super(JobProgressAdapter, self).__init__(None)
self.job_manager = job_manager
self._buf = b''
def write(self, buf):
lines = buf.split(b'\n')
if self._buf:
lines[0] = self._buf + lines[0]
self._buf = lines[-1]
for line in lines[:-1]:
self._parse(line)
def _parse(self, line):
try:
doc = json.loads(line.decode('utf8'))
except ValueError:
return # TODO log?
if not isinstance(doc, dict):
return # TODO log?
self.job_manager.updateProgress(
total=doc.get('total'), current=doc.get('current'), message=doc.get('message')) | en | 0.840827 | Provides a set of values that can be used to color text in the terminal. General-purpose topological sort function. Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. This is a generator function that returns a sequence of sets in topological order. :param data: The dependency information. :type data: dict :returns: Yields a list of sorted sets representing the sorted order. # Ignore self dependencies. # Find all items that don't depend on anything. # Add empty dependences where needed # Perform the toposort. # Detect any cycles in the dependency graph. # Make the temp dir underneath tmp_root config setting # Cleanup the temp dir This function is provided as a convenience to allow use as a decorator of a function rather than using "with tmpdir()" around the whole function body. It passes the generated temp dir path into the function as the special kwarg "_tempdir". Enable a list of plugins. :param plugins: The plugins to enable. :type plugins: list or tuple of str :param paths: Plugin search paths. :type paths: list or tuple of str :param ignore_errors: If a plugin fails to load, this determines whether to raise the exception or simply print an error and keep going. :type ignore_errors: bool :param quiet: Optionally suppress printing status messages. :type quiet: bool :return: Set of plugins that were loaded successfully. :rtype: set Enable a plugin for the worker runtime. :param name: The name of the plugin to load, which is also the name of its containing directory. :type name: str :param paths: Plugin search paths. :type paths: list or tuple of str Helper to close remaining input and output adapters after the subprocess completes. # close any remaining output adapters # close any remaining input adapters Given a mapping of input pipes, return a tuple with 2 elements. The first is a list of file descriptors to pass to ``select`` as writeable descriptors. The second is a dictionary mapping paths to existing named pipes to their adapters. # This is assumed to be an open system-level file descriptor This will attempt to open the named pipes in the set of ``fifos`` for writing, which will only succeed if the subprocess has opened them for reading already. This modifies and returns the list of write descriptors, the list of waiting fifo names, and the mapping back to input adapters. Run a select loop for a set of input and output pipes :param exit_condition: A function to evaluate to determine if the select loop should terminate if all pipes are empty. :type exit_condition: function :param close_output: A function to use to test whether a output should be closed when EOF is reached. Certain output pipes such as stdout, stderr should not be closed. :param outputs: This should be a dictionary mapping pipe descriptors to instances of ``StreamPushAdapter`` that should handle the data from the stream. The keys of this dictionary are open file descriptors, which are integers. :type outputs: dict :param inputs: This should be a dictionary mapping pipe descriptors to instances of ``StreamFetchAdapter`` that should handle sending input data in chunks. Keys in this dictionary can be either open file descriptors (integers) or a string representing a path to an existing fifo on the filesystem. This second case supports the use of named pipes, since they must be opened for reading before they can be opened for writing :type inputs: dict # We evaluate this first so that we get one last iteration of # of the loop before breaking out of the loop. # get ready pipes, timeout of 100 ms # Should we close this pipe? In the case of stdout or stderr # bad things happen if parent closes # TODO for now it's OK for the input reads to block since # input generally happens first, but we should consider how to # support non-blocking stream inputs in the future. # end of stream # all pipes empty? Run a subprocess, and listen for its outputs on various pipes. :param command: The command to run. :type command: list of str :param output_pipes: This should be a dictionary mapping pipe descriptors to instances of ``StreamPushAdapter`` that should handle the data from the stream. Normally, keys of this dictionary are open file descriptors, which are integers. There are two special cases where they are not, which are the keys ``'_stdout'`` and ``'_stderr'``. These special keys correspond to the stdout and stderr pipes that will be created for the subprocess. If these are not set in the ``output_pipes`` map, the default behavior is to direct them to the stdout and stderr of the current process. :type output_pipes: dict :param input_pipes: This should be a dictionary mapping pipe descriptors to instances of ``StreamFetchAdapter`` that should handle sending input data in chunks. Keys in this dictionary can be either open file descriptors (integers), the special value ``'_stdin'`` for standard input, or a string representing a path to an existing fifo on the filesystem. This third case supports the use of named pipes, since they must be opened for reading before they can be opened for writing :type input_pipes: dict # we now know subprocess stdout and stderr filenos, so bind the adapters # Special case for _stdin # kill child process if something went wrong on our end This represents the interface that must be implemented by fetch adapters for IO modes that want to implement streaming input. Fetch adapters must implement this method, which is responsible for reading up to ``self.buf_len`` bytes from the stream. For now, this is expected to be a blocking read, and should return an empty string to indicate the end of the stream. Simply reads data from memory. This can be used to map traditional (non-streaming) inputs to pipes when using ``run_process``. This is roughly identical behavior to BytesIO. This represents the interface that must be implemented by push adapters for IO modes that want to implement streaming output. Initialize the adpater based on the output spec. Write a chunk of data to the output stream. Close the output stream. Called after the last data is sent. Simply wraps another pipe that contains a ``write`` method. This is useful for wrapping ``sys.stdout`` and ``sys.stderr``, where we want to call ``write`` but not ``close`` on them. :param pipe: An object containing a ``write`` method, e.g. sys.stdout. Appends all data from a stream under a key inside a dict. Can be used to bind traditional (non-streaming) outputs to pipes when using ``run_process``. :param output_spec: The output specification. :type output_spec: dict :param key: The key to accumulate the data under. :type key: hashable :param dictionary: Dictionary to write into. If not specified, uses the output_spec. :type dictionary: dict This reads structured JSON documents one line at a time and sends them as progress events via the JobManager. :param job_manager: The job manager to use to send the progress events. :type job_manager: girder_worker.utils.JobManager # TODO log? # TODO log? | 2.466257 | 2 |
appengine_config.py | wangjun/RSSNewsGAE | 0 | 6620067 | #!/usr/bin/env python27
# -*- coding: utf-8 -*-
__author__ = 'liant'
import os
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
#
# Enable ctypes on dev appserver so we get proper flask tracebacks.
# From http://jinja.pocoo.org/docs/dev/faq/#my-tracebacks-look-weird-what-s-happening
# and http://stackoverflow.com/questions/3086091/debug-jinja2-in-google-app-engine
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
import os
import sys
if os.name == 'nt':
os.name = None
sys.platform = ''
| #!/usr/bin/env python27
# -*- coding: utf-8 -*-
__author__ = 'liant'
import os
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
#
# Enable ctypes on dev appserver so we get proper flask tracebacks.
# From http://jinja.pocoo.org/docs/dev/faq/#my-tracebacks-look-weird-what-s-happening
# and http://stackoverflow.com/questions/3086091/debug-jinja2-in-google-app-engine
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
if not PRODUCTION_MODE:
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
import os
import sys
if os.name == 'nt':
os.name = None
sys.platform = ''
| en | 0.702806 | #!/usr/bin/env python27 # -*- coding: utf-8 -*- # Add any libraries installed in the "lib" folder. # # Enable ctypes on dev appserver so we get proper flask tracebacks. # From http://jinja.pocoo.org/docs/dev/faq/#my-tracebacks-look-weird-what-s-happening # and http://stackoverflow.com/questions/3086091/debug-jinja2-in-google-app-engine | 2.075149 | 2 |
__init__.py | mcmont/violet | 0 | 6620068 | <reponame>mcmont/violet<filename>__init__.py<gh_stars>0
from violet.murcko import murcko
from violet.murcko_alpha import murcko_alpha
from violet.reaction import reaction
from violet.rotatable_bonds import rotatable_bonds
from violet.sp3carbon import sp3carbon
from violet.tpsa import tpsa
from violet.regioisomers import regioisomers
__all__ = ['murcko', 'murcko_alpha', 'reaction', 'rotatable_bonds', 'sp3carbon', 'tpsa', 'regioisomers']
| from violet.murcko import murcko
from violet.murcko_alpha import murcko_alpha
from violet.reaction import reaction
from violet.rotatable_bonds import rotatable_bonds
from violet.sp3carbon import sp3carbon
from violet.tpsa import tpsa
from violet.regioisomers import regioisomers
__all__ = ['murcko', 'murcko_alpha', 'reaction', 'rotatable_bonds', 'sp3carbon', 'tpsa', 'regioisomers'] | none | 1 | 1.335309 | 1 | |
tests/test_word.py | ftobia/ham | 3 | 6620069 | import pytest
from ham import Word
class TestWord(object):
def test_constructor(self):
slap = Word('slap')
slap2 = Word(slap)
assert slap == slap2
assert slap is not slap2
def test_str(self):
assert str(Word('foo')) == 'foo'
def test_repr(self):
assert repr(Word('foo')) == '<Word "foo">'
def test_iter(self):
word = Word('hello')
i = iter(word)
assert ''.join(i) == 'hello'
with pytest.raises(StopIteration):
next(i)
assert ''.join(word) == 'hello'
def test_contains(self):
foo = Word('foo')
assert 'f' in foo
assert 'o' in foo
assert 'fo' in foo
assert 'oo' in foo
assert 'foo' in foo
assert 'b' not in foo
assert 'of' not in foo
assert 'foof' not in foo
def test_eq(self):
foo = Word('foo')
assert foo == foo
assert foo == Word('foo')
assert foo == Word(foo)
assert not (foo == Word('monkey'))
def test_ne(self):
assert Word('monkey') != Word('butler')
assert Word('monkey') != 'monkey'
assert not (Word('monkey') != Word('monkey'))
def test_pop(self):
w = Word('foo')
assert w.pop('f') == 'f'
assert str(w) == '.oo'
assert w.pop('o') == 'o'
assert str(w) == '..o'
assert w.pop('o') == 'o'
assert str(w) == '...'
def test_pop_nonexistent(self):
w = Word('foo')
with pytest.raises(ValueError) as excinfo:
w.pop('a')
assert str(excinfo.value) == '"a" is not in word'
def test_pop_single_letter_from_middle_of_word(self):
w = Word('primary')
assert w.pop('m') == 'm'
assert str(w) == 'pri.ary'
def test_pop_multiple_letters(self):
w = Word('monkey')
assert w.pop('onk') == 'onk'
assert str(w) == 'm...ey'
def test_len(self):
key = 'key'
assert len(Word(key)) == len(key)
monkey = 'monkey'
assert len(Word(monkey)) == len(monkey)
w = Word(monkey)
w.pop(key)
assert len(w) == len(monkey) - len(key)
def test_vowel_groups(self):
assert list(Word('hello').vowel_groups()) == ['e', 'o']
assert list(Word('monkey').vowel_groups()) == ['o', 'ey']
assert list(Word('toast').vowel_groups()) == ['oa']
assert list(Word('abomination').vowel_groups()) == \
['a', 'o', 'i', 'a', 'io']
assert list(Word('unceremoniously').vowel_groups()) == \
['u', 'e', 'e', 'o', 'iou', 'y']
@pytest.mark.skipif('sys.version_info >= (3,0)')
def test_pronunciations(self):
from ham import Pronunciation
assert Word('meow').pronunciations() == \
[Pronunciation(['M', 'IY0', 'AW1'])]
assert Word('tomato').pronunciations() == [
Pronunciation(['T', 'AH0', 'M', 'EY1', 'T', 'OW2']),
Pronunciation(['T', 'AH0', 'M', 'AA1', 'T', 'OW2'])
]
assert Word('resume').pronunciations() == [
Pronunciation(['R', 'IH0', 'Z', 'UW1', 'M']),
Pronunciation(['R', 'IY0', 'Z', 'UW1', 'M']),
Pronunciation(['R', 'EH1', 'Z', 'AH0', 'M', 'EY2'])
]
assert Word('googus').pronunciations() == []
| import pytest
from ham import Word
class TestWord(object):
def test_constructor(self):
slap = Word('slap')
slap2 = Word(slap)
assert slap == slap2
assert slap is not slap2
def test_str(self):
assert str(Word('foo')) == 'foo'
def test_repr(self):
assert repr(Word('foo')) == '<Word "foo">'
def test_iter(self):
word = Word('hello')
i = iter(word)
assert ''.join(i) == 'hello'
with pytest.raises(StopIteration):
next(i)
assert ''.join(word) == 'hello'
def test_contains(self):
foo = Word('foo')
assert 'f' in foo
assert 'o' in foo
assert 'fo' in foo
assert 'oo' in foo
assert 'foo' in foo
assert 'b' not in foo
assert 'of' not in foo
assert 'foof' not in foo
def test_eq(self):
foo = Word('foo')
assert foo == foo
assert foo == Word('foo')
assert foo == Word(foo)
assert not (foo == Word('monkey'))
def test_ne(self):
assert Word('monkey') != Word('butler')
assert Word('monkey') != 'monkey'
assert not (Word('monkey') != Word('monkey'))
def test_pop(self):
w = Word('foo')
assert w.pop('f') == 'f'
assert str(w) == '.oo'
assert w.pop('o') == 'o'
assert str(w) == '..o'
assert w.pop('o') == 'o'
assert str(w) == '...'
def test_pop_nonexistent(self):
w = Word('foo')
with pytest.raises(ValueError) as excinfo:
w.pop('a')
assert str(excinfo.value) == '"a" is not in word'
def test_pop_single_letter_from_middle_of_word(self):
w = Word('primary')
assert w.pop('m') == 'm'
assert str(w) == 'pri.ary'
def test_pop_multiple_letters(self):
w = Word('monkey')
assert w.pop('onk') == 'onk'
assert str(w) == 'm...ey'
def test_len(self):
key = 'key'
assert len(Word(key)) == len(key)
monkey = 'monkey'
assert len(Word(monkey)) == len(monkey)
w = Word(monkey)
w.pop(key)
assert len(w) == len(monkey) - len(key)
def test_vowel_groups(self):
assert list(Word('hello').vowel_groups()) == ['e', 'o']
assert list(Word('monkey').vowel_groups()) == ['o', 'ey']
assert list(Word('toast').vowel_groups()) == ['oa']
assert list(Word('abomination').vowel_groups()) == \
['a', 'o', 'i', 'a', 'io']
assert list(Word('unceremoniously').vowel_groups()) == \
['u', 'e', 'e', 'o', 'iou', 'y']
@pytest.mark.skipif('sys.version_info >= (3,0)')
def test_pronunciations(self):
from ham import Pronunciation
assert Word('meow').pronunciations() == \
[Pronunciation(['M', 'IY0', 'AW1'])]
assert Word('tomato').pronunciations() == [
Pronunciation(['T', 'AH0', 'M', 'EY1', 'T', 'OW2']),
Pronunciation(['T', 'AH0', 'M', 'AA1', 'T', 'OW2'])
]
assert Word('resume').pronunciations() == [
Pronunciation(['R', 'IH0', 'Z', 'UW1', 'M']),
Pronunciation(['R', 'IY0', 'Z', 'UW1', 'M']),
Pronunciation(['R', 'EH1', 'Z', 'AH0', 'M', 'EY2'])
]
assert Word('googus').pronunciations() == []
| none | 1 | 3.184655 | 3 | |
backend/server/__init__.py | andres-tuells/saturdayai-hand-gesture | 1 | 6620070 | <filename>backend/server/__init__.py
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import define, options
from tornado.web import Application
from .views import HelloWorld, WSHandler
define('port', default=8888, help='port to listen on')
def main():
"""Construct and serve the tornado application."""
app = Application([
('/', HelloWorld),
('/ws', WSHandler),
])
http_server = HTTPServer(app)
http_server.listen(options.port)
print('Listening on http://localhost:%i' % options.port)
IOLoop.current().start() | <filename>backend/server/__init__.py
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import define, options
from tornado.web import Application
from .views import HelloWorld, WSHandler
define('port', default=8888, help='port to listen on')
def main():
"""Construct and serve the tornado application."""
app = Application([
('/', HelloWorld),
('/ws', WSHandler),
])
http_server = HTTPServer(app)
http_server.listen(options.port)
print('Listening on http://localhost:%i' % options.port)
IOLoop.current().start() | en | 0.910707 | Construct and serve the tornado application. | 2.800305 | 3 |
setup.py | shoemakerdr/analytic_shrinkage | 0 | 6620071 | """
Create Whl: python setup.py sdist bdist_wheel
Local installation: python -m pip install dist/[name-of-whl]
Push to pip: python -m twine upload dist/*
"""
from pathlib import Path
from setuptools import setup, find_packages
readme = Path("README.md")
long_description = readme.read_text()
setup(
name='non-linear-shrinkage',
version='1.0.0',
description="Non-Linear Shrinkage Estimator from Ledoit and Wolf (2018) ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shoemakerdr/analytic_shrinkage",
packages=find_packages(where="src"),
package_dir={"":"src"},
python_requires=">=3.6",
install_requires=[
"numpy"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| """
Create Whl: python setup.py sdist bdist_wheel
Local installation: python -m pip install dist/[name-of-whl]
Push to pip: python -m twine upload dist/*
"""
from pathlib import Path
from setuptools import setup, find_packages
readme = Path("README.md")
long_description = readme.read_text()
setup(
name='non-linear-shrinkage',
version='1.0.0',
description="Non-Linear Shrinkage Estimator from Ledoit and Wolf (2018) ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shoemakerdr/analytic_shrinkage",
packages=find_packages(where="src"),
package_dir={"":"src"},
python_requires=">=3.6",
install_requires=[
"numpy"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| en | 0.794793 | Create Whl: python setup.py sdist bdist_wheel Local installation: python -m pip install dist/[name-of-whl] Push to pip: python -m twine upload dist/* | 1.66628 | 2 |
TulipUIHelpers/duplicateProperty.py | renoust/TulipPythonPluginFarm | 0 | 6620072 | from tulip import *
import tulipplugins
class DuplicateProperty(tlp.Algorithm):
def __init__(self, context):
tlp.Algorithm.__init__(self, context)
self.addPropertyParameter("input property",
"copy from this property",
"", True, True, False)
self.addStringParameter("output property name",
"to this property name"\
"the target has to be of same type, or creates it if it does not exist",
"", True, True, False)
self.addStringCollectionParameter("target",
"the target of the property to set<br>"\
"it can be <i>nodes</i>, <i>edges</i>, or <i>both</i> (nodes and edges)",
"nodes;edges;both", False, True, False)
#self.addIntegerParameter("graph source id",
# "to this property name"\
# "the target has to be of same type, or creates it if it does not exist",
# "", True, True, False)
self.addIntegerParameter("output graph id",
"hte id of the output graph "\
"(if not set'-1', the current graph is the output graph)",
"-1", False, True, False)
self.addStringCollectionParameter("output scope", "the scope of property to copy (<i>global</i> or <i>local</i>)", "global;local", False, True, False)
def check(self):
#source_id = self.dataSet["graph source id"]
#if source_id == "":
# source_id = self.graph.getId()
#if source_id > 0:
# source_graph = self.graph.getRoot().getDescendantGraph(source_id)
#if source_graph == "None":
# return (False, "Please specify a valid source graph ID (empty means current graph)")
target_id = self.dataSet["output graph id"]
if target_id == -1:
target_id = self.graph.getId()
target_graph = self.graph.getRoot()
if target_id > 0:
target_graph = self.graph.getRoot().getDescendantGraph(target_id)
if target_graph == "None":
return (False, "Please specify a valid target graph ID (empty means current graph)")
source_property = self.dataSet["input property"]
target_property_name = self.dataSet["output property name"]
if target_graph.existProperty(target_property_name):
target_property = self.graph.getProperty(target_property_name)
if source_property.getTypename() != target_property.getTypename():
return (False, "source and target properties have different types: '"+source_property.getTypename()+"' and '"+target_property.getTypename()+"' \nplease change the output property name")
return (True, "")
#simplyfing the access to the property interface
def getProp(self, _graph, _name, _type, _scope):
if _type.lower() in ["boolean", "bool"]:
if _scope == "global":
return _graph.getBooleanProperty(_name)
else:
return _graph.getLocalBooleanProperty(_name)
elif _type.lower() in ["string", "str", "unicode"]:
if _scope == "global":
return _graph.getStringProperty(_name)
else:
return _graph.getLocalStringProperty(_name)
elif _type.lower() in ["integer", "int", "unsigned int", "long"]:
if _scope == "global":
return _graph.getIntegerProperty(_name)
else:
return _graph.getLocalIntegerProperty(_name)
elif _type.lower() in ["double", "float"]:
if _scope == "global":
return _graph.getDoubleProperty(_name)
else:
return _graph.getLocalDoubleProperty(_name)
elif _type.lower() in ["layout", "coord"]:
if _scope == "global":
return _graph.getLayoutProperty(_name)
else:
return _graph.getLocalLayoutProperty(_name)
elif _type.lower() in ["color"]:
if _scope == "global":
return _graph.getColorProperty(_name)
else:
return _graph.getLocalColorProperty(_name)
elif _type.lower() in ["size"]:
if _scope == "global":
return _graph.getSizeProperty(_name)
else:
return _graph.getLocalSizeProperty(_name)
def run(self):
#source_id = self.dataSet["graph source id"]
#if source_id == -1:
# source_id = self.graph.getId()
#source_graph = self.graph.getRoot().getDescendantGraph(source_id)
source_id = self.graph.getId()
source_graph = self.graph.getRoot()
if source_id > 0:
source_graph = self.graph.getRoot().getDescendantGraph(source_id)
target_id = self.dataSet["output graph id"]
if target_id == -1:
target_id = self.graph.getId()
target_graph = self.graph.getRoot()
if target_id > 0:
target_graph = self.graph.getRoot().getDescendantGraph(target_id)
source_property = self.dataSet["input property"]
#if source_graph.getId() != self.graph.getId():
# check for the right property in the right graph
target_property_name = self.dataSet["output property name"]
target_scope = self.dataSet["output scope"].getCurrentString()
apply_on = self.dataSet["target"].getCurrentString()
source_type = source_property.getTypename()
target_property = None
target_property = self.getProp(target_graph, target_property_name, source_type, target_scope)
#print "the target property: " ,target_property
if apply_on in ["both", "nodes"]:
for n in target_graph.getNodes():
if self.graph.isElement(n):
target_property[n] = source_property[n]
if apply_on in ["both", "edges"]:
for e in target_graph.getEdges():
if self.graph.isElement(e):
target_property[e] = source_property[e]
return True
# The line below does the magic to register the plugin to the plugin database
# and updates the GUI to make it accessible through the menus.
tulipplugins.registerPluginOfGroup("DuplicateProperty", "Copy/duplicate Property", "<NAME>", "05/05/2015", "Duplicate or copy a graph property (also to another graph)", "1.0", "Property Manipulation")
| from tulip import *
import tulipplugins
class DuplicateProperty(tlp.Algorithm):
def __init__(self, context):
tlp.Algorithm.__init__(self, context)
self.addPropertyParameter("input property",
"copy from this property",
"", True, True, False)
self.addStringParameter("output property name",
"to this property name"\
"the target has to be of same type, or creates it if it does not exist",
"", True, True, False)
self.addStringCollectionParameter("target",
"the target of the property to set<br>"\
"it can be <i>nodes</i>, <i>edges</i>, or <i>both</i> (nodes and edges)",
"nodes;edges;both", False, True, False)
#self.addIntegerParameter("graph source id",
# "to this property name"\
# "the target has to be of same type, or creates it if it does not exist",
# "", True, True, False)
self.addIntegerParameter("output graph id",
"hte id of the output graph "\
"(if not set'-1', the current graph is the output graph)",
"-1", False, True, False)
self.addStringCollectionParameter("output scope", "the scope of property to copy (<i>global</i> or <i>local</i>)", "global;local", False, True, False)
def check(self):
#source_id = self.dataSet["graph source id"]
#if source_id == "":
# source_id = self.graph.getId()
#if source_id > 0:
# source_graph = self.graph.getRoot().getDescendantGraph(source_id)
#if source_graph == "None":
# return (False, "Please specify a valid source graph ID (empty means current graph)")
target_id = self.dataSet["output graph id"]
if target_id == -1:
target_id = self.graph.getId()
target_graph = self.graph.getRoot()
if target_id > 0:
target_graph = self.graph.getRoot().getDescendantGraph(target_id)
if target_graph == "None":
return (False, "Please specify a valid target graph ID (empty means current graph)")
source_property = self.dataSet["input property"]
target_property_name = self.dataSet["output property name"]
if target_graph.existProperty(target_property_name):
target_property = self.graph.getProperty(target_property_name)
if source_property.getTypename() != target_property.getTypename():
return (False, "source and target properties have different types: '"+source_property.getTypename()+"' and '"+target_property.getTypename()+"' \nplease change the output property name")
return (True, "")
#simplyfing the access to the property interface
def getProp(self, _graph, _name, _type, _scope):
if _type.lower() in ["boolean", "bool"]:
if _scope == "global":
return _graph.getBooleanProperty(_name)
else:
return _graph.getLocalBooleanProperty(_name)
elif _type.lower() in ["string", "str", "unicode"]:
if _scope == "global":
return _graph.getStringProperty(_name)
else:
return _graph.getLocalStringProperty(_name)
elif _type.lower() in ["integer", "int", "unsigned int", "long"]:
if _scope == "global":
return _graph.getIntegerProperty(_name)
else:
return _graph.getLocalIntegerProperty(_name)
elif _type.lower() in ["double", "float"]:
if _scope == "global":
return _graph.getDoubleProperty(_name)
else:
return _graph.getLocalDoubleProperty(_name)
elif _type.lower() in ["layout", "coord"]:
if _scope == "global":
return _graph.getLayoutProperty(_name)
else:
return _graph.getLocalLayoutProperty(_name)
elif _type.lower() in ["color"]:
if _scope == "global":
return _graph.getColorProperty(_name)
else:
return _graph.getLocalColorProperty(_name)
elif _type.lower() in ["size"]:
if _scope == "global":
return _graph.getSizeProperty(_name)
else:
return _graph.getLocalSizeProperty(_name)
def run(self):
#source_id = self.dataSet["graph source id"]
#if source_id == -1:
# source_id = self.graph.getId()
#source_graph = self.graph.getRoot().getDescendantGraph(source_id)
source_id = self.graph.getId()
source_graph = self.graph.getRoot()
if source_id > 0:
source_graph = self.graph.getRoot().getDescendantGraph(source_id)
target_id = self.dataSet["output graph id"]
if target_id == -1:
target_id = self.graph.getId()
target_graph = self.graph.getRoot()
if target_id > 0:
target_graph = self.graph.getRoot().getDescendantGraph(target_id)
source_property = self.dataSet["input property"]
#if source_graph.getId() != self.graph.getId():
# check for the right property in the right graph
target_property_name = self.dataSet["output property name"]
target_scope = self.dataSet["output scope"].getCurrentString()
apply_on = self.dataSet["target"].getCurrentString()
source_type = source_property.getTypename()
target_property = None
target_property = self.getProp(target_graph, target_property_name, source_type, target_scope)
#print "the target property: " ,target_property
if apply_on in ["both", "nodes"]:
for n in target_graph.getNodes():
if self.graph.isElement(n):
target_property[n] = source_property[n]
if apply_on in ["both", "edges"]:
for e in target_graph.getEdges():
if self.graph.isElement(e):
target_property[e] = source_property[e]
return True
# The line below does the magic to register the plugin to the plugin database
# and updates the GUI to make it accessible through the menus.
tulipplugins.registerPluginOfGroup("DuplicateProperty", "Copy/duplicate Property", "<NAME>", "05/05/2015", "Duplicate or copy a graph property (also to another graph)", "1.0", "Property Manipulation")
| en | 0.505826 | #self.addIntegerParameter("graph source id", # "to this property name"\ # "the target has to be of same type, or creates it if it does not exist", # "", True, True, False) #source_id = self.dataSet["graph source id"] #if source_id == "": # source_id = self.graph.getId() #if source_id > 0: # source_graph = self.graph.getRoot().getDescendantGraph(source_id) #if source_graph == "None": # return (False, "Please specify a valid source graph ID (empty means current graph)") #simplyfing the access to the property interface #source_id = self.dataSet["graph source id"] #if source_id == -1: # source_id = self.graph.getId() #source_graph = self.graph.getRoot().getDescendantGraph(source_id) #if source_graph.getId() != self.graph.getId(): # check for the right property in the right graph #print "the target property: " ,target_property # The line below does the magic to register the plugin to the plugin database # and updates the GUI to make it accessible through the menus. | 2.699187 | 3 |
fabfile.py | igorsobreira/eizzek | 1 | 6620073 | import os.path
from fabric.api import *
from eizzek import config
env.hosts = [config.SSH_HOST] # format: username@host:port
VIRTUALENV_DIR = '/home/igor/eizzek_env'
EIZZEK_DIR = os.path.join(VIRTUALENV_DIR, 'eizzek')
python = os.path.join(VIRTUALENV_DIR, 'bin', 'python')
twistd = os.path.join(VIRTUALENV_DIR, 'bin', 'twistd')
def update_deps(*deps):
for dep in deps:
print ' - Updating %s' % dep
with cd( os.path.join(VIRTUALENV_DIR, dep) ):
run('git pull')
run('%s setup.py install' % python)
def update(all=False):
''' Update the project. Use :all to update all git depedencies '''
if all:
update_deps('wokkel')
with cd(EIZZEK_DIR):
print ' - Updating eizzek'
run('git pull')
send_config()
def start():
''' Start bot service '''
with cd(EIZZEK_DIR):
run('%s -y eizzek/twistd.tac' % twistd)
def stop(force=False):
''' Stop the bot. Use :force to kill -9. Default is -15 '''
with cd(EIZZEK_DIR):
if 'twistd.pid' not in run('ls'):
print ' - Not running'
return
pid = run('cat twistd.pid')
force = '-9' if force else '-15'
run( 'kill %s %s' % (force, pid) )
def send_config():
''' Send the local config.py to the server '''
put('eizzek/config.py', os.path.join(EIZZEK_DIR, 'eizzek', 'config.py'))
| import os.path
from fabric.api import *
from eizzek import config
env.hosts = [config.SSH_HOST] # format: username@host:port
VIRTUALENV_DIR = '/home/igor/eizzek_env'
EIZZEK_DIR = os.path.join(VIRTUALENV_DIR, 'eizzek')
python = os.path.join(VIRTUALENV_DIR, 'bin', 'python')
twistd = os.path.join(VIRTUALENV_DIR, 'bin', 'twistd')
def update_deps(*deps):
for dep in deps:
print ' - Updating %s' % dep
with cd( os.path.join(VIRTUALENV_DIR, dep) ):
run('git pull')
run('%s setup.py install' % python)
def update(all=False):
''' Update the project. Use :all to update all git depedencies '''
if all:
update_deps('wokkel')
with cd(EIZZEK_DIR):
print ' - Updating eizzek'
run('git pull')
send_config()
def start():
''' Start bot service '''
with cd(EIZZEK_DIR):
run('%s -y eizzek/twistd.tac' % twistd)
def stop(force=False):
''' Stop the bot. Use :force to kill -9. Default is -15 '''
with cd(EIZZEK_DIR):
if 'twistd.pid' not in run('ls'):
print ' - Not running'
return
pid = run('cat twistd.pid')
force = '-9' if force else '-15'
run( 'kill %s %s' % (force, pid) )
def send_config():
''' Send the local config.py to the server '''
put('eizzek/config.py', os.path.join(EIZZEK_DIR, 'eizzek', 'config.py'))
| en | 0.4766 | # format: username@host:port Update the project. Use :all to update all git depedencies Start bot service Stop the bot. Use :force to kill -9. Default is -15 Send the local config.py to the server | 1.905703 | 2 |
src/test/py/bazel/test_wrapper_test.py | orcguru/bazel | 0 | 6620074 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from src.test.py.bazel import test_base
class TestWrapperTest(test_base.TestBase):
def _CreateMockWorkspace(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'sh_test(',
' name = "passing_test.bat",',
' srcs = ["passing.bat"],',
')',
'sh_test(',
' name = "failing_test.bat",',
' srcs = ["failing.bat"],',
')',
'sh_test(',
' name = "printing_test.bat",',
' srcs = ["printing.bat"],',
')',
])
self.ScratchFile('foo/passing.bat', ['@exit /B 0'], executable=True)
self.ScratchFile('foo/failing.bat', ['@exit /B 1'], executable=True)
self.ScratchFile('foo/printing.bat', ['@echo lorem ipsum'], executable=True)
def _AssertPassingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:passing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
def _AssertFailingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:failing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 3, stderr)
def _AssertPrintingTest(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:printing_test.bat',
'--test_output=streamed',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
found = False
for line in stdout + stderr:
if 'lorem ipsum' in line:
found = True
if not found:
self.fail('FAIL: output:\n%s\n---' % '\n'.join(stderr + stdout))
def testTestExecutionWithTestSetupShAndWithTestWrapperExe(self):
self._CreateMockWorkspace()
flag = '--nowindows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
# As of 2018-08-30, the Windows native test runner can run simple tests,
# though it does not set up the test's environment yet.
flag = '--windows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
if __name__ == '__main__':
unittest.main()
| # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from src.test.py.bazel import test_base
class TestWrapperTest(test_base.TestBase):
def _CreateMockWorkspace(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'sh_test(',
' name = "passing_test.bat",',
' srcs = ["passing.bat"],',
')',
'sh_test(',
' name = "failing_test.bat",',
' srcs = ["failing.bat"],',
')',
'sh_test(',
' name = "printing_test.bat",',
' srcs = ["printing.bat"],',
')',
])
self.ScratchFile('foo/passing.bat', ['@exit /B 0'], executable=True)
self.ScratchFile('foo/failing.bat', ['@exit /B 1'], executable=True)
self.ScratchFile('foo/printing.bat', ['@echo lorem ipsum'], executable=True)
def _AssertPassingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:passing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
def _AssertFailingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:failing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 3, stderr)
def _AssertPrintingTest(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:printing_test.bat',
'--test_output=streamed',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
found = False
for line in stdout + stderr:
if 'lorem ipsum' in line:
found = True
if not found:
self.fail('FAIL: output:\n%s\n---' % '\n'.join(stderr + stdout))
def testTestExecutionWithTestSetupShAndWithTestWrapperExe(self):
self._CreateMockWorkspace()
flag = '--nowindows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
# As of 2018-08-30, the Windows native test runner can run simple tests,
# though it does not set up the test's environment yet.
flag = '--windows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
if __name__ == '__main__':
unittest.main()
| en | 0.871233 | # Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # As of 2018-08-30, the Windows native test runner can run simple tests, # though it does not set up the test's environment yet. | 1.919359 | 2 |
gaze_birl/complexreward.py | asaran/gaze-LfD | 1 | 6620075 | <filename>gaze_birl/complexreward.py
# -*- coding: utf-8 -*-
import birl
import utils
import numpy as np
import matplotlib.pyplot as plt
#calculate the policy loss between the hypothesis return and the map return
def calculate_policy_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, hyp_return = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
map_return = hyp_reward_fn.get_reward(map_placement)
return hyp_return - map_return
def calculate_placement_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#active_utils.visualize_reward(hyp_reward_fn, "hypothesis reward")
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, _ = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
#print "placement loss", np.linalg.norm(hyp_placement - map_placement)
#plt.show()
return np.linalg.norm(hyp_placement - map_placement)
def get_best_placement(config, map_params):
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
return map_placement | <filename>gaze_birl/complexreward.py
# -*- coding: utf-8 -*-
import birl
import utils
import numpy as np
import matplotlib.pyplot as plt
#calculate the policy loss between the hypothesis return and the map return
def calculate_policy_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, hyp_return = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
map_return = hyp_reward_fn.get_reward(map_placement)
return hyp_return - map_return
def calculate_placement_loss(config, hyp_params, map_params):
#calculate reward for optimal placement under hyp_reward
hyp_obj_weights, hyp_abs_weights = hyp_params
hyp_reward_fn = utils.RbfComplexReward(config, hyp_obj_weights, hyp_abs_weights)
#active_utils.visualize_reward(hyp_reward_fn, "hypothesis reward")
#get optimal placement under the hypothesis reward function and new configuration
hyp_placement, _ = hyp_reward_fn.estimate_best_placement()
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
#print "placement loss", np.linalg.norm(hyp_placement - map_placement)
#plt.show()
return np.linalg.norm(hyp_placement - map_placement)
def get_best_placement(config, map_params):
#calculate reward for map placement under hyp_reward
map_obj_weights, map_abs_weights = map_params
map_reward_fn = utils.RbfComplexReward(config, map_obj_weights, map_abs_weights)
#active_utils.visualize_reward(map_reward_fn, "map reward")
#get optimal placement under map reward function and new configuration
map_placement, _ = map_reward_fn.estimate_best_placement()
return map_placement | en | 0.589333 | # -*- coding: utf-8 -*- #calculate the policy loss between the hypothesis return and the map return #calculate reward for optimal placement under hyp_reward #get optimal placement under the hypothesis reward function and new configuration #calculate reward for map placement under hyp_reward #get optimal placement under map reward function and new configuration #calculate reward for optimal placement under hyp_reward #active_utils.visualize_reward(hyp_reward_fn, "hypothesis reward") #get optimal placement under the hypothesis reward function and new configuration #calculate reward for map placement under hyp_reward #active_utils.visualize_reward(map_reward_fn, "map reward") #get optimal placement under map reward function and new configuration #print "placement loss", np.linalg.norm(hyp_placement - map_placement) #plt.show() #calculate reward for map placement under hyp_reward #active_utils.visualize_reward(map_reward_fn, "map reward") #get optimal placement under map reward function and new configuration | 2.357607 | 2 |
users/admin.py | r34g4n/ADT_booking | 0 | 6620076 | from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import Gender, Patient, Doctor
# Register your models here.
admin.site.register(Gender, SimpleHistoryAdmin)
admin.site.register(Patient, SimpleHistoryAdmin)
admin.site.register(Doctor, SimpleHistoryAdmin)
| from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import Gender, Patient, Doctor
# Register your models here.
admin.site.register(Gender, SimpleHistoryAdmin)
admin.site.register(Patient, SimpleHistoryAdmin)
admin.site.register(Doctor, SimpleHistoryAdmin)
| en | 0.968259 | # Register your models here. | 1.413712 | 1 |
svelte_frontend/test_frontend/test_integration.py | BurnySc2/tools | 0 | 6620077 | <gh_stars>0
from pathlib import Path
from typing import Set
from playwright.sync_api import BrowserContext, Page
from burny_common.integration_test_helper import (
find_next_free_port,
get_website_address,
kill_processes,
remove_leftover_files,
start_fastapi_dev_server,
start_svelte_dev_server,
)
class TestClass:
FRONTEND_ADDRESS = ''
BACKEND_ADDRESS = ''
# Remember which node processes to close
NEWLY_CREATED_PROCESSES: Set[int] = set()
# And which files to remove
CREATED_FILES: Set[Path] = set()
def setup_method(self, _method=None):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
See https://docs.pytest.org/en/6.2.x/xunit_setup.html
"""
free_frontend_port = find_next_free_port()
free_backend_port = find_next_free_port(exclude_ports={free_frontend_port})
self.FRONTEND_ADDRESS = get_website_address(free_frontend_port)
self.BACKEND_ADDRESS = f'http://localhost:{free_backend_port}'
start_fastapi_dev_server(free_backend_port, self.NEWLY_CREATED_PROCESSES)
start_svelte_dev_server(
free_frontend_port,
self.NEWLY_CREATED_PROCESSES,
backend_proxy=f'localhost:{free_backend_port}',
)
def teardown_method(self, _method=None):
""" teardown any state that was previously setup with a setup_method
call.
"""
# Stop frontend + backend server
kill_processes(self.NEWLY_CREATED_PROCESSES)
self.NEWLY_CREATED_PROCESSES.clear()
# Remove files created by test
remove_leftover_files(self.CREATED_FILES)
self.CREATED_FILES.clear()
def test_backend_server_available(self, page: Page):
page.goto(self.BACKEND_ADDRESS)
assert '{"Hello":"World"}' in page.content()
def test_frontend_server_available(self, page: Page):
page.goto(self.FRONTEND_ADDRESS)
assert 'Home' in page.content()
assert 'About' in page.content()
assert 'Chat' in page.content()
assert 'Todo' in page.content()
assert 'Slugs' in page.content()
assert 'BrowserStorage' in page.content()
def test_add_todo_submit1(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit1')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_add_todo_submit2(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit2')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_add_todo_submit3(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit3')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_chat_single(self, page: Page):
""" Chat with yourself """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#chat')
page.wait_for_url('/normalchat')
my_username = 'beep_boop'
assert my_username not in page.content()
page.fill('#username', my_username)
page.click('#connect')
# Send a message by pressing send button
some_text = 'bla blubb'
page.fill('#chatinput', some_text)
assert 'You' not in page.content()
page.click('#sendmessage')
assert 'You' in page.content()
assert some_text in page.content()
# Send a message by pressing enter
some_other_text = 'some other text'
page.type('#chatinput', f'{some_other_text}\n')
assert some_other_text in page.content()
def test_chat_two_people(self, context: BrowserContext):
""" Make sure chat between 2 people work """
# Connect with robot1
page1 = context.new_page()
page1.goto(self.FRONTEND_ADDRESS)
page1.click('#chat')
page1.wait_for_url('/normalchat')
my_username1 = 'robot1'
page1.fill('#username', my_username1)
page1.click('#connect')
# Send message from robot1
some_text1 = 'sometext1'
page1.fill('#chatinput', some_text1)
page1.click('#sendmessage')
assert 'You' in page1.content()
assert some_text1 in page1.content()
# Connect with robot2
page2 = context.new_page()
page2.goto(self.FRONTEND_ADDRESS)
page2.click('#chat')
page2.wait_for_url('/normalchat')
my_username2 = 'robot2'
page2.fill('#username', my_username2)
page2.click('#connect')
# Make sure robot1's messages are visible from robot2
assert my_username1 in page2.content()
assert some_text1 in page2.content()
# Send message from robot2
some_text2 = 'sometext2'
page2.fill('#chatinput', some_text2)
page2.click('#sendmessage')
assert 'You' in page2.content()
assert some_text2 in page2.content()
# Make sure robot2's messages are visible from robot1
assert my_username2 in page1.content()
assert some_text2 in page1.content()
| from pathlib import Path
from typing import Set
from playwright.sync_api import BrowserContext, Page
from burny_common.integration_test_helper import (
find_next_free_port,
get_website_address,
kill_processes,
remove_leftover_files,
start_fastapi_dev_server,
start_svelte_dev_server,
)
class TestClass:
FRONTEND_ADDRESS = ''
BACKEND_ADDRESS = ''
# Remember which node processes to close
NEWLY_CREATED_PROCESSES: Set[int] = set()
# And which files to remove
CREATED_FILES: Set[Path] = set()
def setup_method(self, _method=None):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
See https://docs.pytest.org/en/6.2.x/xunit_setup.html
"""
free_frontend_port = find_next_free_port()
free_backend_port = find_next_free_port(exclude_ports={free_frontend_port})
self.FRONTEND_ADDRESS = get_website_address(free_frontend_port)
self.BACKEND_ADDRESS = f'http://localhost:{free_backend_port}'
start_fastapi_dev_server(free_backend_port, self.NEWLY_CREATED_PROCESSES)
start_svelte_dev_server(
free_frontend_port,
self.NEWLY_CREATED_PROCESSES,
backend_proxy=f'localhost:{free_backend_port}',
)
def teardown_method(self, _method=None):
""" teardown any state that was previously setup with a setup_method
call.
"""
# Stop frontend + backend server
kill_processes(self.NEWLY_CREATED_PROCESSES)
self.NEWLY_CREATED_PROCESSES.clear()
# Remove files created by test
remove_leftover_files(self.CREATED_FILES)
self.CREATED_FILES.clear()
def test_backend_server_available(self, page: Page):
page.goto(self.BACKEND_ADDRESS)
assert '{"Hello":"World"}' in page.content()
def test_frontend_server_available(self, page: Page):
page.goto(self.FRONTEND_ADDRESS)
assert 'Home' in page.content()
assert 'About' in page.content()
assert 'Chat' in page.content()
assert 'Todo' in page.content()
assert 'Slugs' in page.content()
assert 'BrowserStorage' in page.content()
def test_add_todo_submit1(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit1')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_add_todo_submit2(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit2')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_add_todo_submit3(self, page: Page):
""" Add a new to-do entry """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#todo')
page.wait_for_url('/todo')
assert 'Unable to connect to server - running local mode' not in page.content()
test_text = 'my amazing test todo text1'
assert test_text not in page.content()
page.fill('#newTodoInput', test_text)
page.click('#submit3')
page.wait_for_timeout(100)
assert test_text in page.content()
assert 'Unable to connect to server - running local mode' not in page.content()
def test_chat_single(self, page: Page):
""" Chat with yourself """
page.goto(self.FRONTEND_ADDRESS)
assert 'Hello world!' in page.content()
page.click('#chat')
page.wait_for_url('/normalchat')
my_username = 'beep_boop'
assert my_username not in page.content()
page.fill('#username', my_username)
page.click('#connect')
# Send a message by pressing send button
some_text = 'bla blubb'
page.fill('#chatinput', some_text)
assert 'You' not in page.content()
page.click('#sendmessage')
assert 'You' in page.content()
assert some_text in page.content()
# Send a message by pressing enter
some_other_text = 'some other text'
page.type('#chatinput', f'{some_other_text}\n')
assert some_other_text in page.content()
def test_chat_two_people(self, context: BrowserContext):
""" Make sure chat between 2 people work """
# Connect with robot1
page1 = context.new_page()
page1.goto(self.FRONTEND_ADDRESS)
page1.click('#chat')
page1.wait_for_url('/normalchat')
my_username1 = 'robot1'
page1.fill('#username', my_username1)
page1.click('#connect')
# Send message from robot1
some_text1 = 'sometext1'
page1.fill('#chatinput', some_text1)
page1.click('#sendmessage')
assert 'You' in page1.content()
assert some_text1 in page1.content()
# Connect with robot2
page2 = context.new_page()
page2.goto(self.FRONTEND_ADDRESS)
page2.click('#chat')
page2.wait_for_url('/normalchat')
my_username2 = 'robot2'
page2.fill('#username', my_username2)
page2.click('#connect')
# Make sure robot1's messages are visible from robot2
assert my_username1 in page2.content()
assert some_text1 in page2.content()
# Send message from robot2
some_text2 = 'sometext2'
page2.fill('#chatinput', some_text2)
page2.click('#sendmessage')
assert 'You' in page2.content()
assert some_text2 in page2.content()
# Make sure robot2's messages are visible from robot1
assert my_username2 in page1.content()
assert some_text2 in page1.content() | en | 0.849221 | # Remember which node processes to close # And which files to remove setup any state tied to the execution of the given method in a class. setup_method is invoked for every test method of a class. See https://docs.pytest.org/en/6.2.x/xunit_setup.html teardown any state that was previously setup with a setup_method call. # Stop frontend + backend server # Remove files created by test Add a new to-do entry Add a new to-do entry Add a new to-do entry Chat with yourself # Send a message by pressing send button # Send a message by pressing enter Make sure chat between 2 people work # Connect with robot1 # Send message from robot1 # Connect with robot2 # Make sure robot1's messages are visible from robot2 # Send message from robot2 # Make sure robot2's messages are visible from robot1 | 2.093174 | 2 |
app/__init__.py | zhiyong-lv/flasky | 0 | 6620078 | <reponame>zhiyong-lv/flasky
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_pagedown import PageDown
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
pagedown = PageDown()
def create_app(app_config):
app = Flask(__name__)
app_config.init_app(app)
# initial app
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
mail.init_app(app)
migrate.init_app(app)
pagedown.init_app(app)
login_manager.init_app(app)
# Start to import blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint, url_prefix='/')
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_pagedown import PageDown
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
pagedown = PageDown()
def create_app(app_config):
app = Flask(__name__)
app_config.init_app(app)
# initial app
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
mail.init_app(app)
migrate.init_app(app)
pagedown.init_app(app)
login_manager.init_app(app)
# Start to import blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint, url_prefix='/')
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app | en | 0.642896 | # initial app # Start to import blueprint | 2.20782 | 2 |
ALE/utils.py | waggle-sensor/machinelearning | 0 | 6620079 | <gh_stars>0
import gdown
from zipfile import ZipFile
import os
import shutil
def downloadData():
""" Downloads example datasets: MNIST, CIFAR10, and a toy dataset """
url = "https://drive.google.com/uc?export=download&id=1ZaT0nRFVO2kvQT1fbh6b3dqsJAUEINJN"
output_path = "Data/DataSetZip.zip"
gdown.download(url,output_path,quiet=False)
with ZipFile(output_path, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall("Data")
os.remove(output_path)
os.rename("Data/DataSetsZip","Data/DataSets")
if os.path.isdir("Data/__MACOSX"):
shutil.rmtree("Data/__MACOSX")
| import gdown
from zipfile import ZipFile
import os
import shutil
def downloadData():
""" Downloads example datasets: MNIST, CIFAR10, and a toy dataset """
url = "https://drive.google.com/uc?export=download&id=1ZaT0nRFVO2kvQT1fbh6b3dqsJAUEINJN"
output_path = "Data/DataSetZip.zip"
gdown.download(url,output_path,quiet=False)
with ZipFile(output_path, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall("Data")
os.remove(output_path)
os.rename("Data/DataSetsZip","Data/DataSets")
if os.path.isdir("Data/__MACOSX"):
shutil.rmtree("Data/__MACOSX") | en | 0.708136 | Downloads example datasets: MNIST, CIFAR10, and a toy dataset # Extract all the contents of zip file in current directory | 3.140275 | 3 |
user/user_schema.py | jesseinit/feather-insure | 0 | 6620080 | <gh_stars>0
from app import ma
from utils.base_schema import BaseSchema
from marshmallow import fields, validate, pre_dump
from user.user_model import User
class RegisterSchema(BaseSchema):
first_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="First name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your First Name"},
)
last_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="Last name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your Last Name"},
)
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
data["first_name"] = data["first_name"].title()
data["last_name"] = data["last_name"].title()
return data
class LoginSchema(BaseSchema):
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
return data
class UserProfileSchema(ma.SQLAlchemyAutoSchema): # type: ignore
class Meta:
model = User
| from app import ma
from utils.base_schema import BaseSchema
from marshmallow import fields, validate, pre_dump
from user.user_model import User
class RegisterSchema(BaseSchema):
first_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="First name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your First Name"},
)
last_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="Last name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your Last Name"},
)
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
data["first_name"] = data["first_name"].title()
data["last_name"] = data["last_name"].title()
return data
class LoginSchema(BaseSchema):
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
return data
class UserProfileSchema(ma.SQLAlchemyAutoSchema): # type: ignore
class Meta:
model = User | it | 0.190853 | # type: ignore | 2.766915 | 3 |
dataBase_upload.py | hperugu/TransG | 0 | 6620081 | <filename>dataBase_upload.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 17:26:31 2021
@author: <NAME> PhD
"""
from sqlalchemy import create_engine
import pandas as pd
from sqlalchemy.sql import text
import pdb
class dbSetup():
def __init__(self,filename):
self.filename = filename
""" To create a connection engine for different database server"""
def creatEng(self,conType):
self.conType = conType
if conType == 'SQLite':
#engine = create_engine('sqlite:///:memory:', echo=True)
eng = create_engine('sqlite:///C:\\Users\\wb580236\\sqlite3\\Transport.db', echo=True)
elif conType == 'MariaDB':
eng = create_engine("mariadb+pymysql://<user>:<password>@<some_mariadb_host>[:<port>]/<dbname>?charset=utf8mb4", echo=False)
elif conType == 'MySQL':
eng = create_engine("mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>")
else:
print ("Connection Type Not Specified. Eg: SQLite, MySQl etc.")
pass
return eng
""" To Check a Table exists"""
def checkTabl(self,conType,table_name):
eng = self.creatEng(conType)
self.table_name = table_name
if eng.has_table(table_name):
chk_status = 'yes'
else :
chk_status = 'no'
return chk_status
""" Read IEA data dump in Xml format"""
def readData(self,conType,data,sqlite_table):
self.conType = conType
eng = self.creatEng(conType)
self.data = data
self.sqlite_table = sqlite_table
sqlite_connection = eng.connect()
if isinstance(data,pd.DataFrame):
# convert read df into a variable
newDf = data
elif isinstance(data, str):
# convert file into a data frame
newDf = self.readCSV(data)
else:
print ("Cannot identify the type of data structure")
pass
try:
newDf.to_sql(sqlite_table, sqlite_connection, if_exists='fail')
except:
print ("The table" + sqlite_table+ " already exists! ")
pass
sqlite_connection.close()
""" Read CSV file """
def readCSV(self,filename):
self.filename = filename
newDf = pd.read_csv(filename,header="infer")
return newDf
""" Preprocess the data"""
def Preprocess(self, conType):
self.conType = conType
eng = self.creatEng(conType)
# Start the session
with eng.begin() as conn:
# Create the PRIMARY KEY for Emission rate Table, if does not exist
conn.execute(text("BEGIN TRANSACTION;"))
conn.execute(text("DROP TABLE IF EXISTS FuelAll_old"))
conn.execute(text("ALTER TABLE FuelAll RENAME TO FuelAll_old;"))
conn.execute(text("CREATE TABLE FuelAll (ix BIGINT NOT NULL ,COUNTRY VARCHAR(50) NOT NULL,\
FLOW VARCHAR(50) NULL, PRODUCT VARCHAR(50) NULL, TIME INT NULL, OBS FLOAT NULL,\
OBS_STATUS VARCHAR(50) NULL, CONSTRAINT country_index \
PRIMARY KEY (ix,COUNTRY, PRODUCT, FLOW));"))
conn.execute(text("INSERT INTO FuelAll(ix, COUNTRY, FLOW,PRODUCT,TIME, OBS, OBS_STATUS) \
SELECT \"index\", COUNTRY, FLOW,PRODUCT,cast(TIME as INTEGER), cast(OBS as FLOAT), \
OBS_STATUS FROM FuelAll_old;"))
conn.execute(text ("COMMIT;"))
| <filename>dataBase_upload.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 17:26:31 2021
@author: <NAME> PhD
"""
from sqlalchemy import create_engine
import pandas as pd
from sqlalchemy.sql import text
import pdb
class dbSetup():
def __init__(self,filename):
self.filename = filename
""" To create a connection engine for different database server"""
def creatEng(self,conType):
self.conType = conType
if conType == 'SQLite':
#engine = create_engine('sqlite:///:memory:', echo=True)
eng = create_engine('sqlite:///C:\\Users\\wb580236\\sqlite3\\Transport.db', echo=True)
elif conType == 'MariaDB':
eng = create_engine("mariadb+pymysql://<user>:<password>@<some_mariadb_host>[:<port>]/<dbname>?charset=utf8mb4", echo=False)
elif conType == 'MySQL':
eng = create_engine("mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>")
else:
print ("Connection Type Not Specified. Eg: SQLite, MySQl etc.")
pass
return eng
""" To Check a Table exists"""
def checkTabl(self,conType,table_name):
eng = self.creatEng(conType)
self.table_name = table_name
if eng.has_table(table_name):
chk_status = 'yes'
else :
chk_status = 'no'
return chk_status
""" Read IEA data dump in Xml format"""
def readData(self,conType,data,sqlite_table):
self.conType = conType
eng = self.creatEng(conType)
self.data = data
self.sqlite_table = sqlite_table
sqlite_connection = eng.connect()
if isinstance(data,pd.DataFrame):
# convert read df into a variable
newDf = data
elif isinstance(data, str):
# convert file into a data frame
newDf = self.readCSV(data)
else:
print ("Cannot identify the type of data structure")
pass
try:
newDf.to_sql(sqlite_table, sqlite_connection, if_exists='fail')
except:
print ("The table" + sqlite_table+ " already exists! ")
pass
sqlite_connection.close()
""" Read CSV file """
def readCSV(self,filename):
self.filename = filename
newDf = pd.read_csv(filename,header="infer")
return newDf
""" Preprocess the data"""
def Preprocess(self, conType):
self.conType = conType
eng = self.creatEng(conType)
# Start the session
with eng.begin() as conn:
# Create the PRIMARY KEY for Emission rate Table, if does not exist
conn.execute(text("BEGIN TRANSACTION;"))
conn.execute(text("DROP TABLE IF EXISTS FuelAll_old"))
conn.execute(text("ALTER TABLE FuelAll RENAME TO FuelAll_old;"))
conn.execute(text("CREATE TABLE FuelAll (ix BIGINT NOT NULL ,COUNTRY VARCHAR(50) NOT NULL,\
FLOW VARCHAR(50) NULL, PRODUCT VARCHAR(50) NULL, TIME INT NULL, OBS FLOAT NULL,\
OBS_STATUS VARCHAR(50) NULL, CONSTRAINT country_index \
PRIMARY KEY (ix,COUNTRY, PRODUCT, FLOW));"))
conn.execute(text("INSERT INTO FuelAll(ix, COUNTRY, FLOW,PRODUCT,TIME, OBS, OBS_STATUS) \
SELECT \"index\", COUNTRY, FLOW,PRODUCT,cast(TIME as INTEGER), cast(OBS as FLOAT), \
OBS_STATUS FROM FuelAll_old;"))
conn.execute(text ("COMMIT;"))
| en | 0.681218 | # -*- coding: utf-8 -*- Created on Wed Jun 9 17:26:31 2021
@author: <NAME> PhD To create a connection engine for different database server #engine = create_engine('sqlite:///:memory:', echo=True) To Check a Table exists Read IEA data dump in Xml format # convert read df into a variable # convert file into a data frame Read CSV file Preprocess the data # Start the session # Create the PRIMARY KEY for Emission rate Table, if does not exist | 3.096601 | 3 |
Rahul.py | Rahul-m0/fossotober | 0 | 6620082 | print("<NAME>")
print("AM.EN.U4CSE19244")
print("S1 CSE")
print("Marvel Rocks")
| print("<NAME>")
print("AM.EN.U4CSE19244")
print("S1 CSE")
print("Marvel Rocks")
| none | 1 | 1.489202 | 1 | |
lib/fitbit/api.py | goztrk/django-htk | 206 | 6620083 | # Python Standard Library Imports
import base64
# Third Party (PyPI) Imports
import requests
import rollbar
# HTK Imports
from htk.lib.fitbit.constants import *
from htk.utils import refresh
from htk.utils import utcnow
class FitbitAPI(object):
"""
https://dev.fitbit.com/docs/
"""
def __init__(self, social_auth_user, client_id, client_secret):
"""Constructor for FitbitAPI
`social_auth_user` a python-social-auth object
`client_id` OAuth2 Client Id from Fitbit App settings
`client_secret` OAuth2 Client Secret from Fitbit App settings
"""
self.user = social_auth_user.user
self.social_auth_user = social_auth_user
self.client_id = client_id
self.client_secret = client_secret
def get_resource_url(self, resource_type, resource_args=None):
"""Returns the resource URL for `resource_type`
"""
resource_path = FITBIT_API_RESOURCES.get(resource_type)
if resource_args:
resource_path = resource_path(*resource_args)
url = '%s%s' % (
FITBIT_API_BASE_URL,
resource_path,
)
return url
def make_headers(self, auth_type, headers=None):
"""Make headers for Fitbit API request
`auth_type` the string 'basic' or 'bearer'
https://dev.fitbit.com/docs/basics/#language
"""
# refreshes token if necessary
if self.social_auth_user.access_token_expired():
from social_django.utils import load_strategy
access_token = self.social_auth_user.get_access_token(load_strategy())
self.social_auth_user = refresh(self.social_auth_user)
if auth_type == 'bearer':
auth_header = 'Bearer %s' % self.social_auth_user.extra_data['access_token']
else:
auth_header = 'Basic %s' % base64.b64encode('%s:%s' % (self.client_id, self.client_secret,))
_headers = {
'Authorization' : auth_header,
'Accept-Locale' : 'en_US',
'Accept-Language' : 'en_US',
}
if headers:
_headers.update(headers)
headers = _headers
return headers
def get(self, resource_type, resource_args=None, params=None, headers=None, auth_type='bearer', refresh_token=True):
"""Performs a Fitbit API GET request
`auth_type` the string 'basic' or 'bearer'
`refresh_token` if True, will refresh the OAuth token when needed
"""
url = self.get_resource_url(resource_type, resource_args=resource_args)
if headers is None:
headers = self.make_headers(auth_type, headers=headers)
response = requests.get(url, headers=headers, params=params)
if response.status_code == 401:
# TODO: deprecate. should proactively refresh
if refresh_token:
was_refreshed = self.refresh_oauth2_token()
if was_refreshed:
# if token was successfully refreshed, repeat request
response = self.get(resource_type, resource_args=resource_args, params=params, headers=headers, auth_type=auth_type, refresh_token=False)
else:
pass
else:
extra_data = {
'user_id' : self.social_auth_user.user.id,
'username' : self.social_auth_user.user.username,
'response' : response.json(),
}
rollbar.report_message('Fitbit OAuth token expired, needs refreshing', extra_data=extra_data)
elif response.status_code == 200:
pass
else:
extra_data = {
'response' : response.json(),
}
rollbar.report_message('Unexpected response from Fitbit API GET request', extra_data=extra_data)
return response
def post(self, resource_type, resource_args=None, params=None, headers=None, auth_type='bearer'):
"""Performs a Fitbit API POST request
`auth_type` the string 'basic' or 'bearer'
"""
url = self.get_resource_url(resource_type, resource_args=resource_args)
headers = self.make_headers(auth_type, headers=headers)
response = requests.post(url, headers=headers, params=params)
return response
##################################################
# Permissions API calls
def refresh_oauth2_token(self):
# TODO: deprecate
params = {
'grant_type' : 'refresh_token',
'refresh_token' : self.social_auth_user.extra_data['refresh_token'],
}
headers = {
'Content-Type' : 'application/x-www-form-urlencoded',
}
response = self.post('refresh', params, headers=headers, auth_type='basic')
if response.status_code == 200:
response_json = response.json()
self.social_auth_user.extra_data.update(response_json)
self.social_auth_user.save()
was_refreshed = True
else:
was_refreshed = False
extra_data = {
'user_id' : self.social_auth_user.user.id,
'username' : self.social_auth_user.user.username,
'response' : response.json(),
}
rollbar.report_message('Unable to refresh Fitbit OAuth2.0 token', extra_data=extra_data)
return was_refreshed
def revoke_access(self):
params = {
'token' : self.social_auth_user.extra_data['access_token'],
}
response = self.post('revoke', params, 'basic')
if response.status_code == 200:
was_revoked = True
else:
was_revoked = False
return was_revoked
##################################################
# Regular API calls
##
# Activity
# https://dev.fitbit.com/build/reference/web-api/activity/
def get_activity_steps_past_month(self):
"""Get Steps for past month
Requires the 'activity' permission'
https://dev.fitbit.com/docs/activity/
"""
response = self.get('activity-steps-monthly')
if response.status_code == 200:
activity = response.json()['activities-steps']
activity = activity[::-1]
else:
activity = None
return activity
##
# Body & Weight
# https://dev.fitbit.com/build/reference/web-api/body/
def get_body_fat_logs_past_day(self):
"""Get Body Fat logs for the past day
"""
resource_args = (
utcnow().strftime('%Y-%m-%d'),
'1d',
)
response = self.get('fat', resource_args=resource_args)
if response.status_code == 200:
fat_logs = response.json()['fat']
fat_logs = fat_logs[::-1]
else:
fat_logs = None
return fat_logs
def get_weight_logs_past_day(self):
"""Get Weight logs for the past day
"""
resource_args = (
utcnow().strftime('%Y-%m-%d'),
'1d',
)
response = self.get('weight', resource_args=resource_args)
if response.status_code == 200:
weight_logs = response.json()['weight']
weight_logs = weight_logs[::-1]
else:
weight_logs = None
return weight_logs
def get_most_recent_weight(self):
weight_logs = self.get_weight_logs_past_day()
weight_log = weight_logs[0]
return weight_log
##
# Devices
# https://dev.fitbit.com/build/reference/web-api/devices/
def get_devices(self):
"""Get a list of Devices
Requires the 'settings' permission
https://dev.fitbit.com/docs/devices/
"""
response = self.get('devices')
if response.status_code == 200:
devices = response.json()
else:
devices = []
return devices
| # Python Standard Library Imports
import base64
# Third Party (PyPI) Imports
import requests
import rollbar
# HTK Imports
from htk.lib.fitbit.constants import *
from htk.utils import refresh
from htk.utils import utcnow
class FitbitAPI(object):
"""
https://dev.fitbit.com/docs/
"""
def __init__(self, social_auth_user, client_id, client_secret):
"""Constructor for FitbitAPI
`social_auth_user` a python-social-auth object
`client_id` OAuth2 Client Id from Fitbit App settings
`client_secret` OAuth2 Client Secret from Fitbit App settings
"""
self.user = social_auth_user.user
self.social_auth_user = social_auth_user
self.client_id = client_id
self.client_secret = client_secret
def get_resource_url(self, resource_type, resource_args=None):
"""Returns the resource URL for `resource_type`
"""
resource_path = FITBIT_API_RESOURCES.get(resource_type)
if resource_args:
resource_path = resource_path(*resource_args)
url = '%s%s' % (
FITBIT_API_BASE_URL,
resource_path,
)
return url
def make_headers(self, auth_type, headers=None):
"""Make headers for Fitbit API request
`auth_type` the string 'basic' or 'bearer'
https://dev.fitbit.com/docs/basics/#language
"""
# refreshes token if necessary
if self.social_auth_user.access_token_expired():
from social_django.utils import load_strategy
access_token = self.social_auth_user.get_access_token(load_strategy())
self.social_auth_user = refresh(self.social_auth_user)
if auth_type == 'bearer':
auth_header = 'Bearer %s' % self.social_auth_user.extra_data['access_token']
else:
auth_header = 'Basic %s' % base64.b64encode('%s:%s' % (self.client_id, self.client_secret,))
_headers = {
'Authorization' : auth_header,
'Accept-Locale' : 'en_US',
'Accept-Language' : 'en_US',
}
if headers:
_headers.update(headers)
headers = _headers
return headers
def get(self, resource_type, resource_args=None, params=None, headers=None, auth_type='bearer', refresh_token=True):
"""Performs a Fitbit API GET request
`auth_type` the string 'basic' or 'bearer'
`refresh_token` if True, will refresh the OAuth token when needed
"""
url = self.get_resource_url(resource_type, resource_args=resource_args)
if headers is None:
headers = self.make_headers(auth_type, headers=headers)
response = requests.get(url, headers=headers, params=params)
if response.status_code == 401:
# TODO: deprecate. should proactively refresh
if refresh_token:
was_refreshed = self.refresh_oauth2_token()
if was_refreshed:
# if token was successfully refreshed, repeat request
response = self.get(resource_type, resource_args=resource_args, params=params, headers=headers, auth_type=auth_type, refresh_token=False)
else:
pass
else:
extra_data = {
'user_id' : self.social_auth_user.user.id,
'username' : self.social_auth_user.user.username,
'response' : response.json(),
}
rollbar.report_message('Fitbit OAuth token expired, needs refreshing', extra_data=extra_data)
elif response.status_code == 200:
pass
else:
extra_data = {
'response' : response.json(),
}
rollbar.report_message('Unexpected response from Fitbit API GET request', extra_data=extra_data)
return response
def post(self, resource_type, resource_args=None, params=None, headers=None, auth_type='bearer'):
"""Performs a Fitbit API POST request
`auth_type` the string 'basic' or 'bearer'
"""
url = self.get_resource_url(resource_type, resource_args=resource_args)
headers = self.make_headers(auth_type, headers=headers)
response = requests.post(url, headers=headers, params=params)
return response
##################################################
# Permissions API calls
def refresh_oauth2_token(self):
# TODO: deprecate
params = {
'grant_type' : 'refresh_token',
'refresh_token' : self.social_auth_user.extra_data['refresh_token'],
}
headers = {
'Content-Type' : 'application/x-www-form-urlencoded',
}
response = self.post('refresh', params, headers=headers, auth_type='basic')
if response.status_code == 200:
response_json = response.json()
self.social_auth_user.extra_data.update(response_json)
self.social_auth_user.save()
was_refreshed = True
else:
was_refreshed = False
extra_data = {
'user_id' : self.social_auth_user.user.id,
'username' : self.social_auth_user.user.username,
'response' : response.json(),
}
rollbar.report_message('Unable to refresh Fitbit OAuth2.0 token', extra_data=extra_data)
return was_refreshed
def revoke_access(self):
params = {
'token' : self.social_auth_user.extra_data['access_token'],
}
response = self.post('revoke', params, 'basic')
if response.status_code == 200:
was_revoked = True
else:
was_revoked = False
return was_revoked
##################################################
# Regular API calls
##
# Activity
# https://dev.fitbit.com/build/reference/web-api/activity/
def get_activity_steps_past_month(self):
"""Get Steps for past month
Requires the 'activity' permission'
https://dev.fitbit.com/docs/activity/
"""
response = self.get('activity-steps-monthly')
if response.status_code == 200:
activity = response.json()['activities-steps']
activity = activity[::-1]
else:
activity = None
return activity
##
# Body & Weight
# https://dev.fitbit.com/build/reference/web-api/body/
def get_body_fat_logs_past_day(self):
"""Get Body Fat logs for the past day
"""
resource_args = (
utcnow().strftime('%Y-%m-%d'),
'1d',
)
response = self.get('fat', resource_args=resource_args)
if response.status_code == 200:
fat_logs = response.json()['fat']
fat_logs = fat_logs[::-1]
else:
fat_logs = None
return fat_logs
def get_weight_logs_past_day(self):
"""Get Weight logs for the past day
"""
resource_args = (
utcnow().strftime('%Y-%m-%d'),
'1d',
)
response = self.get('weight', resource_args=resource_args)
if response.status_code == 200:
weight_logs = response.json()['weight']
weight_logs = weight_logs[::-1]
else:
weight_logs = None
return weight_logs
def get_most_recent_weight(self):
weight_logs = self.get_weight_logs_past_day()
weight_log = weight_logs[0]
return weight_log
##
# Devices
# https://dev.fitbit.com/build/reference/web-api/devices/
def get_devices(self):
"""Get a list of Devices
Requires the 'settings' permission
https://dev.fitbit.com/docs/devices/
"""
response = self.get('devices')
if response.status_code == 200:
devices = response.json()
else:
devices = []
return devices
| en | 0.402447 | # Python Standard Library Imports # Third Party (PyPI) Imports # HTK Imports https://dev.fitbit.com/docs/ Constructor for FitbitAPI `social_auth_user` a python-social-auth object `client_id` OAuth2 Client Id from Fitbit App settings `client_secret` OAuth2 Client Secret from Fitbit App settings Returns the resource URL for `resource_type` Make headers for Fitbit API request `auth_type` the string 'basic' or 'bearer' https://dev.fitbit.com/docs/basics/#language # refreshes token if necessary Performs a Fitbit API GET request `auth_type` the string 'basic' or 'bearer' `refresh_token` if True, will refresh the OAuth token when needed # TODO: deprecate. should proactively refresh # if token was successfully refreshed, repeat request Performs a Fitbit API POST request `auth_type` the string 'basic' or 'bearer' ################################################## # Permissions API calls # TODO: deprecate ################################################## # Regular API calls ## # Activity # https://dev.fitbit.com/build/reference/web-api/activity/ Get Steps for past month Requires the 'activity' permission' https://dev.fitbit.com/docs/activity/ ## # Body & Weight # https://dev.fitbit.com/build/reference/web-api/body/ Get Body Fat logs for the past day Get Weight logs for the past day ## # Devices # https://dev.fitbit.com/build/reference/web-api/devices/ Get a list of Devices Requires the 'settings' permission https://dev.fitbit.com/docs/devices/ | 2.38806 | 2 |
app/users/migrations/0014_alter_organization_metadata.py | thevahidal/hoopoe-core | 5 | 6620084 | <reponame>thevahidal/hoopoe-core<filename>app/users/migrations/0014_alter_organization_metadata.py<gh_stars>1-10
# Generated by Django 4.0 on 2022-02-28 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0013_alter_driver_type'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='metadata',
field=models.JSONField(blank=True, default=dict),
),
]
| # Generated by Django 4.0 on 2022-02-28 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0013_alter_driver_type'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='metadata',
field=models.JSONField(blank=True, default=dict),
),
] | en | 0.823688 | # Generated by Django 4.0 on 2022-02-28 20:53 | 1.499581 | 1 |
plotting/file_names.py | kienpt/site_discovery_public | 4 | 6620085 | <filename>plotting/file_names.py<gh_stars>1-10
def get_filenames(domain):
if domain == 'forum':
fname = 'result_atf_stacking_search-kw_count-50_1530573706.79.csv' #50k
kw_files = ['../../data/discovery/forum/keyword/' + fname,
'../../data/discovery/forum/keyword/' + fname + '.classification']
fname = 'result_atf_stacking_search-bl_count-50_1530738010.2.csv' #50K
bl_files = ['../../data/discovery/forum/backlink/' + fname,
'../../data/discovery/forum/backlink/' + fname + '.classification']
fname = 'result_atf_stacking_search-rl_count-50_1530732222.28.csv' #50k
rl_files = ['../../data/discovery/forum/related/' + fname,
'../../data/discovery/forum/related/' + fname + '.classification']
fname = 'result_atf_stacking_search-fw_count-50_1531369359.77.csv' #50k
fw_files = ['../../data/discovery/forum/forward/' + fname,
'../../data/discovery/forum/forward/' + fname + '.classification']
fname = 'result_atf_stacking_search-bandit_count-50_1531073652.78.csv' #50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/forum/bandit/'+fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/forum/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/atf_forum_nocv_maxpages5/forum_nocv.csv',
'../../data/discovery/seedfinder/forum_classification.csv']
ac_files = ['../../baselines/ache/results/forum_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/forum_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/forum_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/forum_classification.csv']
outfile = 'forum.csv'
elif domain == 'ads':
fname = 'result_atf_stacking_search-kw_count-50_1530664888.25.csv' # 50k
kw_files = ['../../data/discovery/ads/keyword/' + fname,
'../../data/discovery/ads/keyword/' + fname + '.classification']
fname = 'result_atf_stacking_search-bl_count-50_1531100629.87.csv' # 50k
bl_files = ['../../data/discovery/ads/backlink/' + fname,
'../../data/discovery/ads/backlink/' + fname + '.classification']
fname = 'result_atf_stacking_search-rl_count-50_1531631713.76.csv' # 50k
rl_files = ['/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/related/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/related/' + fname + '.classification']
fname = 'result_atf_stacking_search-fw_count-50_1531200713.74.csv' # 50k
fw_files = ['../../data/discovery/ads/forward/' + fname,
'../../data/discovery/ads/forward/' + fname + '.classification']
fname = 'result_atf_stacking_search-bandit_count-50_1531115744.04.csv' # 50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/bandit/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/ads/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/atf_ads_nocv_maxpages5/ads_nocv.csv',
'../../data/discovery/seedfinder/ads_classification.csv']
ac_files = ['../../baselines/ache/results/ads_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/ads_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/ads_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/ads_classification.csv']
outfile = 'ads.csv'
elif domain == 'ht':
fname = 'result_ht_stacking_search-kw_count-50_1530635004.67.csv' # 10 hard
kw_files = ['../../data/discovery/escort/keyword/' + fname,
'../../data/discovery/escort/keyword/' + fname + '.classification']
fname = 'result_ht_stacking_search-bl_count-50_1531156079.24.csv'
bl_files = ['../../data/discovery/escort/backlink/' + fname,
'../../data/discovery/escort/backlink/' + fname + '.classification'] # 50k
fname = 'result_ht_stacking_search-rl_count-50_1531364504.0.csv' #50k
rl_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/related/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/related/' + fname + '.classification']
fname = 'result_ht_stacking_search-fw_count-50_1530807787.68.csv' # 50k
fw_files = ['../../data/discovery/escort/forward/' + fname,
'../../data/discovery/escort/forward/' + fname + '.classification']
fname = 'result_ht_stacking_search-bandit_count-50_1531200883.78.csv' # 50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/bandit/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/escort_nocv_maxpages5/escort_bing_api_200queries.csv',
'../../data/discovery/seedfinder/escort_classification.csv']
ac_files = ['../../baselines/ache/results/escort_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/escort_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/escort_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/escort_classification.csv']
outfile = 'ht.csv'
return kw_files, bl_files, rl_files, fw_files, bandit_files, sf_files, ac_files, bi_files, outfile
| <filename>plotting/file_names.py<gh_stars>1-10
def get_filenames(domain):
if domain == 'forum':
fname = 'result_atf_stacking_search-kw_count-50_1530573706.79.csv' #50k
kw_files = ['../../data/discovery/forum/keyword/' + fname,
'../../data/discovery/forum/keyword/' + fname + '.classification']
fname = 'result_atf_stacking_search-bl_count-50_1530738010.2.csv' #50K
bl_files = ['../../data/discovery/forum/backlink/' + fname,
'../../data/discovery/forum/backlink/' + fname + '.classification']
fname = 'result_atf_stacking_search-rl_count-50_1530732222.28.csv' #50k
rl_files = ['../../data/discovery/forum/related/' + fname,
'../../data/discovery/forum/related/' + fname + '.classification']
fname = 'result_atf_stacking_search-fw_count-50_1531369359.77.csv' #50k
fw_files = ['../../data/discovery/forum/forward/' + fname,
'../../data/discovery/forum/forward/' + fname + '.classification']
fname = 'result_atf_stacking_search-bandit_count-50_1531073652.78.csv' #50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/forum/bandit/'+fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/forum/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/atf_forum_nocv_maxpages5/forum_nocv.csv',
'../../data/discovery/seedfinder/forum_classification.csv']
ac_files = ['../../baselines/ache/results/forum_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/forum_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/forum_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/forum_classification.csv']
outfile = 'forum.csv'
elif domain == 'ads':
fname = 'result_atf_stacking_search-kw_count-50_1530664888.25.csv' # 50k
kw_files = ['../../data/discovery/ads/keyword/' + fname,
'../../data/discovery/ads/keyword/' + fname + '.classification']
fname = 'result_atf_stacking_search-bl_count-50_1531100629.87.csv' # 50k
bl_files = ['../../data/discovery/ads/backlink/' + fname,
'../../data/discovery/ads/backlink/' + fname + '.classification']
fname = 'result_atf_stacking_search-rl_count-50_1531631713.76.csv' # 50k
rl_files = ['/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/related/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/related/' + fname + '.classification']
fname = 'result_atf_stacking_search-fw_count-50_1531200713.74.csv' # 50k
fw_files = ['../../data/discovery/ads/forward/' + fname,
'../../data/discovery/ads/forward/' + fname + '.classification']
fname = 'result_atf_stacking_search-bandit_count-50_1531115744.04.csv' # 50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery//data/discovery/ads/bandit/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/ads/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/atf_ads_nocv_maxpages5/ads_nocv.csv',
'../../data/discovery/seedfinder/ads_classification.csv']
ac_files = ['../../baselines/ache/results/ads_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/ads_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/ads_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/ads_classification.csv']
outfile = 'ads.csv'
elif domain == 'ht':
fname = 'result_ht_stacking_search-kw_count-50_1530635004.67.csv' # 10 hard
kw_files = ['../../data/discovery/escort/keyword/' + fname,
'../../data/discovery/escort/keyword/' + fname + '.classification']
fname = 'result_ht_stacking_search-bl_count-50_1531156079.24.csv'
bl_files = ['../../data/discovery/escort/backlink/' + fname,
'../../data/discovery/escort/backlink/' + fname + '.classification'] # 50k
fname = 'result_ht_stacking_search-rl_count-50_1531364504.0.csv' #50k
rl_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/related/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/related/' + fname + '.classification']
fname = 'result_ht_stacking_search-fw_count-50_1530807787.68.csv' # 50k
fw_files = ['../../data/discovery/escort/forward/' + fname,
'../../data/discovery/escort/forward/' + fname + '.classification']
fname = 'result_ht_stacking_search-bandit_count-50_1531200883.78.csv' # 50k
bandit_files = ['/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/bandit/' + fname,
'/home/vgc/kienpham/memex_project/site_discovery/data/discovery/escort/bandit/' + fname + '.classification']
sf_files = ['../../baselines/ache/results/escort_nocv_maxpages5/escort_bing_api_200queries.csv',
'../../data/discovery/seedfinder/escort_classification.csv']
ac_files = ['../../baselines/ache/results/escort_crawl_hard_10/default/data_monitor/crawledpages.csv',
'../../data/discovery/ache/escort_hard_10_classification.csv'] # 10 hard
bi_files = ['../../baselines/ache/results/escort_bipartite/default/data_monitor/crawledpages.csv',
'../../data/discovery/bipartite/escort_classification.csv']
outfile = 'ht.csv'
return kw_files, bl_files, rl_files, fw_files, bandit_files, sf_files, ac_files, bi_files, outfile
| en | 0.605183 | #50k #50K #50k #50k #50k # 10 hard # 50k # 50k # 50k # 50k # 50k # 10 hard # 10 hard # 50k #50k # 50k # 50k # 10 hard | 2.411731 | 2 |
botclean/optimize.py | Kabix1/HackerRank | 0 | 6620086 | from skopt.space import Real
from skopt.utils import use_named_args
from skopt import gp_minimize
from Strategies import closest_prio4
from test import generate_board, try_strategy
space = [
Real(0, 1, name="A"),
Real(0, 1, name="B"),
Real(0, 10, name="C"),
Real(0, 10, name="D")
]
@use_named_args(space)
def objective(**params):
closest_prio4.A = params["A"]
closest_prio4.B = params["B"]
closest_prio4.C = params["C"]
closest_prio4.D = params["D"]
num_tries = 200
steps = 0
for _ in range(num_tries):
pos, board = generate_board()
steps += try_strategy(closest_prio4, pos, board)
return steps / num_tries
res_gp = gp_minimize(objective,
space,
n_calls=100,
random_state=0,
verbose=True,
n_jobs=6,
acq_optimizer="lbfgs")
print(res_gp.x)
| from skopt.space import Real
from skopt.utils import use_named_args
from skopt import gp_minimize
from Strategies import closest_prio4
from test import generate_board, try_strategy
space = [
Real(0, 1, name="A"),
Real(0, 1, name="B"),
Real(0, 10, name="C"),
Real(0, 10, name="D")
]
@use_named_args(space)
def objective(**params):
closest_prio4.A = params["A"]
closest_prio4.B = params["B"]
closest_prio4.C = params["C"]
closest_prio4.D = params["D"]
num_tries = 200
steps = 0
for _ in range(num_tries):
pos, board = generate_board()
steps += try_strategy(closest_prio4, pos, board)
return steps / num_tries
res_gp = gp_minimize(objective,
space,
n_calls=100,
random_state=0,
verbose=True,
n_jobs=6,
acq_optimizer="lbfgs")
print(res_gp.x)
| none | 1 | 2.372823 | 2 | |
vantage6/common/__init__.py | IKNL/vantage6-common | 0 | 6620087 | import os
import base64
import click
import appdirs
from colorama import init, Fore, Style
from ._version import version_info, __version__
from vantage6.common.globals import STRING_ENCODING
# init colorstuff
init()
def logger_name(special__name__):
log_name = special__name__.split('.')[-1]
if len(log_name) > 14:
log_name = log_name[:11] + ".."
return log_name
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super(Singleton, cls).__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
def bytes_to_base64s(bytes_):
"""Return bytes as base64 encoded string."""
return base64.b64encode(bytes_).decode(STRING_ENCODING)
def base64s_to_bytes(bytes_string):
"""Return base64 encoded string as bytes."""
return base64.b64decode(bytes_string.encode(STRING_ENCODING))
#
# CLI prints
#
def echo(msg, level="info"):
type_ = {
"error": f"[{Fore.RED}error{Style.RESET_ALL}]",
"warn": f"[{Fore.YELLOW}warn{Style.RESET_ALL}]",
"info": f"[{Fore.GREEN}info{Style.RESET_ALL}]",
"debug": f"[{Fore.CYAN}debug{Style.RESET_ALL}]",
}.get(level)
click.echo(f"{type_:16} - {msg}")
def info(msg):
echo(msg, "info")
def warning(msg):
echo(msg, "warn")
def error(msg):
echo(msg, "error")
def debug(msg):
echo(msg, "debug")
class ClickLogger:
""""Logs output to the click interface."""
@staticmethod
def info(msg):
info(msg)
@staticmethod
def warn(msg):
warning(msg)
@staticmethod
def error(msg):
error(msg)
@staticmethod
def debug(msg):
debug(msg)
def check_config_write_permissions(system_folders=False):
dirs = appdirs.AppDirs()
if system_folders:
dirs_to_check = [
dirs.site_config_dir
]
else:
dirs_to_check = [
dirs.user_config_dir
]
w_ok = True
for dir_ in dirs_to_check:
if not os.access(dir_, os.W_OK):
warning(f"No write permissions at '{dir_}'")
w_ok = False
return w_ok
def check_write_permissions(folder):
w_ok = True
if not os.access(folder, os.W_OK):
warning(f"No write permissions at '{folder}'")
w_ok = False
return w_ok
| import os
import base64
import click
import appdirs
from colorama import init, Fore, Style
from ._version import version_info, __version__
from vantage6.common.globals import STRING_ENCODING
# init colorstuff
init()
def logger_name(special__name__):
log_name = special__name__.split('.')[-1]
if len(log_name) > 14:
log_name = log_name[:11] + ".."
return log_name
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super(Singleton, cls).__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
def bytes_to_base64s(bytes_):
"""Return bytes as base64 encoded string."""
return base64.b64encode(bytes_).decode(STRING_ENCODING)
def base64s_to_bytes(bytes_string):
"""Return base64 encoded string as bytes."""
return base64.b64decode(bytes_string.encode(STRING_ENCODING))
#
# CLI prints
#
def echo(msg, level="info"):
type_ = {
"error": f"[{Fore.RED}error{Style.RESET_ALL}]",
"warn": f"[{Fore.YELLOW}warn{Style.RESET_ALL}]",
"info": f"[{Fore.GREEN}info{Style.RESET_ALL}]",
"debug": f"[{Fore.CYAN}debug{Style.RESET_ALL}]",
}.get(level)
click.echo(f"{type_:16} - {msg}")
def info(msg):
echo(msg, "info")
def warning(msg):
echo(msg, "warn")
def error(msg):
echo(msg, "error")
def debug(msg):
echo(msg, "debug")
class ClickLogger:
""""Logs output to the click interface."""
@staticmethod
def info(msg):
info(msg)
@staticmethod
def warn(msg):
warning(msg)
@staticmethod
def error(msg):
error(msg)
@staticmethod
def debug(msg):
debug(msg)
def check_config_write_permissions(system_folders=False):
dirs = appdirs.AppDirs()
if system_folders:
dirs_to_check = [
dirs.site_config_dir
]
else:
dirs_to_check = [
dirs.user_config_dir
]
w_ok = True
for dir_ in dirs_to_check:
if not os.access(dir_, os.W_OK):
warning(f"No write permissions at '{dir_}'")
w_ok = False
return w_ok
def check_write_permissions(folder):
w_ok = True
if not os.access(folder, os.W_OK):
warning(f"No write permissions at '{folder}'")
w_ok = False
return w_ok
| en | 0.770231 | # init colorstuff Return bytes as base64 encoded string. Return base64 encoded string as bytes. # # CLI prints # "Logs output to the click interface. | 1.957037 | 2 |
gui/launcher/launcher.py | Alestrio/PeopleVoice | 0 | 6620088 | <filename>gui/launcher/launcher.py
#
# Copyright (c) 2020 by <NAME>, <NAME> and <NAME>. All Rights Reserved.
#
import launcherview
import settings as settings
import sys
sys.path.insert(0, "ioactions")
sys.path.insert(0, "gui/admin")
sys.path.insert(0, "gui/configurator")
sys.path.insert(0, "gui/adminlogin")
sys.path.insert(0, "gui/student")
import admin
import adminlogin
import configurator
import student
class Launcher:
def __init__(self):
self.view = launcherview.Launcherview(self)
self.sett = settings.Settings('settings.yaml') # TODO path in a global var
return None
def startLauncher(self):
self.view.createAndShowWindow()
return None
def startAdminMode(self):
self.view.window.destroy()
adminlog = adminlogin.Adminlogin(self.sett.getAdminPWHash(), self.sett.getAdminIdentifier())
if adminlog.isAccessGranted():
adm = admin.Admin()
return None
def startFirstRun(self):
config = configurator.Configurator()
if config.hasSucceeded():
adminlog = adminlogin.Adminlogin(self.sett.getAdminPWHash(), self.sett.getAdminIdentifier())
if adminlog.isAccessGranted():
adm = admin.Admin()
return None
def startResultMode(self):
return None
def startStudentMode(self):
self.view.window.destroy()
stud = student.Student()
return None
| <filename>gui/launcher/launcher.py
#
# Copyright (c) 2020 by <NAME>, <NAME> and <NAME>. All Rights Reserved.
#
import launcherview
import settings as settings
import sys
sys.path.insert(0, "ioactions")
sys.path.insert(0, "gui/admin")
sys.path.insert(0, "gui/configurator")
sys.path.insert(0, "gui/adminlogin")
sys.path.insert(0, "gui/student")
import admin
import adminlogin
import configurator
import student
class Launcher:
def __init__(self):
self.view = launcherview.Launcherview(self)
self.sett = settings.Settings('settings.yaml') # TODO path in a global var
return None
def startLauncher(self):
self.view.createAndShowWindow()
return None
def startAdminMode(self):
self.view.window.destroy()
adminlog = adminlogin.Adminlogin(self.sett.getAdminPWHash(), self.sett.getAdminIdentifier())
if adminlog.isAccessGranted():
adm = admin.Admin()
return None
def startFirstRun(self):
config = configurator.Configurator()
if config.hasSucceeded():
adminlog = adminlogin.Adminlogin(self.sett.getAdminPWHash(), self.sett.getAdminIdentifier())
if adminlog.isAccessGranted():
adm = admin.Admin()
return None
def startResultMode(self):
return None
def startStudentMode(self):
self.view.window.destroy()
stud = student.Student()
return None
| en | 0.838229 | # # Copyright (c) 2020 by <NAME>, <NAME> and <NAME>. All Rights Reserved. # # TODO path in a global var | 2.326727 | 2 |
tests/r/test_cities.py | hajime9652/observations | 199 | 6620089 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.cities import cities
def test_cities():
"""Test module cities.py by downloading
cities.csv and testing shape of
extracted data has 11 rows and 11 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = cities(test_path)
try:
assert x_train.shape == (11, 11)
except:
shutil.rmtree(test_path)
raise()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.cities import cities
def test_cities():
"""Test module cities.py by downloading
cities.csv and testing shape of
extracted data has 11 rows and 11 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = cities(test_path)
try:
assert x_train.shape == (11, 11)
except:
shutil.rmtree(test_path)
raise() | en | 0.901499 | Test module cities.py by downloading cities.csv and testing shape of extracted data has 11 rows and 11 columns | 2.456505 | 2 |
legacy/ABC_111/C1.py | mo-mo-666/AtCoder | 0 | 6620090 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: mo-mo-
#
# Created: 29/09/2018
# Copyright: (c) mo-mo- 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
n = int(input())
vs = list(map(int, input().split()))
v_odd = {}
v_even = {}
for i in range(n):
if i % 2 == 0:
v = vs[i]
if v in v_even:
v_even[v] += 1
else:
v_even[v] = 1
else:
v = vs[i]
if v in v_odd:
v_odd[v] += 1
else:
v_odd[v] = 1
odd_sort = sorted(v_odd.items(), key=lambda x: -x[1])
even_sort = sorted(v_even.items(), key=lambda x: -x[1])
if odd_sort[0][0] == even_sort[0][0]:
if len(odd_sort) >= 2:
oddnext = odd_sort[1][1]
if len(even_sort) >= 2:
evennext = even_sort[1][1]
ans = min(n-oddnext-even_sort[0][1], n-odd_sort[0][1]-evennext)
else:
ans = min(n-oddnext-even_sort[0][1], n-odd_sort[0][1])
else:
if len(even_sort) >= 2:
evennext = even_sort[1][1]
ans = min(n-odd_sort[0][1]-evennext, n-even_sort[0][1])
else:
ans = n // 2
else:
ans = n - odd_sort[0][1] - even_sort[0][1]
print(ans)
| #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: mo-mo-
#
# Created: 29/09/2018
# Copyright: (c) mo-mo- 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
n = int(input())
vs = list(map(int, input().split()))
v_odd = {}
v_even = {}
for i in range(n):
if i % 2 == 0:
v = vs[i]
if v in v_even:
v_even[v] += 1
else:
v_even[v] = 1
else:
v = vs[i]
if v in v_odd:
v_odd[v] += 1
else:
v_odd[v] = 1
odd_sort = sorted(v_odd.items(), key=lambda x: -x[1])
even_sort = sorted(v_even.items(), key=lambda x: -x[1])
if odd_sort[0][0] == even_sort[0][0]:
if len(odd_sort) >= 2:
oddnext = odd_sort[1][1]
if len(even_sort) >= 2:
evennext = even_sort[1][1]
ans = min(n-oddnext-even_sort[0][1], n-odd_sort[0][1]-evennext)
else:
ans = min(n-oddnext-even_sort[0][1], n-odd_sort[0][1])
else:
if len(even_sort) >= 2:
evennext = even_sort[1][1]
ans = min(n-odd_sort[0][1]-evennext, n-even_sort[0][1])
else:
ans = n // 2
else:
ans = n - odd_sort[0][1] - even_sort[0][1]
print(ans)
| en | 0.211859 | #------------------------------------------------------------------------------- # Name: module1 # Purpose: # # Author: mo-mo- # # Created: 29/09/2018 # Copyright: (c) mo-mo- 2018 # Licence: <your licence> #------------------------------------------------------------------------------- | 3.613764 | 4 |
supriya/commands/NodeQueryRequest.py | deeuu/supriya | 0 | 6620091 | import supriya.osc
from supriya.commands.Request import Request
from supriya.enums import RequestId
class NodeQueryRequest(Request):
"""
A /n_query request.
::
>>> import supriya.commands
>>> request = supriya.commands.NodeQueryRequest(
... node_id=1000,
... )
>>> request
NodeQueryRequest(
node_id=1000,
)
::
>>> request.to_osc()
OscMessage('/n_query', 1000)
"""
### CLASS VARIABLES ###
request_id = RequestId.NODE_QUERY
### INITIALIZER ###
def __init__(self, node_id=None):
Request.__init__(self)
self._node_id = node_id
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
node_id = int(self.node_id)
message = supriya.osc.OscMessage(request_id, node_id)
return message
### PUBLIC PROPERTIES ###
@property
def node_id(self):
return self._node_id
@property
def response_patterns(self):
return ["/n_info", self.node_id], ["/fail"]
| import supriya.osc
from supriya.commands.Request import Request
from supriya.enums import RequestId
class NodeQueryRequest(Request):
"""
A /n_query request.
::
>>> import supriya.commands
>>> request = supriya.commands.NodeQueryRequest(
... node_id=1000,
... )
>>> request
NodeQueryRequest(
node_id=1000,
)
::
>>> request.to_osc()
OscMessage('/n_query', 1000)
"""
### CLASS VARIABLES ###
request_id = RequestId.NODE_QUERY
### INITIALIZER ###
def __init__(self, node_id=None):
Request.__init__(self)
self._node_id = node_id
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
node_id = int(self.node_id)
message = supriya.osc.OscMessage(request_id, node_id)
return message
### PUBLIC PROPERTIES ###
@property
def node_id(self):
return self._node_id
@property
def response_patterns(self):
return ["/n_info", self.node_id], ["/fail"]
| en | 0.265898 | A /n_query request. :: >>> import supriya.commands >>> request = supriya.commands.NodeQueryRequest( ... node_id=1000, ... ) >>> request NodeQueryRequest( node_id=1000, ) :: >>> request.to_osc() OscMessage('/n_query', 1000) ### CLASS VARIABLES ### ### INITIALIZER ### ### PUBLIC METHODS ### ### PUBLIC PROPERTIES ### | 2.41222 | 2 |
iclientpy/iclientpy/rest/api/securitymanagement.py | SuperMap/iClientPython | 28 | 6620092 | <filename>iclientpy/iclientpy/rest/api/securitymanagement.py
from typing import List
from ..decorator import post, get, put, delete
from .model import UserEntity, UserInfo, RoleEntity, MethodResult
class SecurityManagement:
@get('/manager/security/users')
def get_users(self) -> List[List[str]]:
pass
@post('/manager/security/users', entityKW='entity')
def post_users(self, entity: UserEntity) -> MethodResult:
pass
@put('/manager/security/users', entityKW='entity')
def put_users(self, entity: List[str]) -> MethodResult:
pass
@get('/manager/security/users/{username}')
def get_user(self, username: str) -> UserInfo:
pass
@put('/manager/security/users/{username}', entityKW='entity')
def put_user(self, username: str, entity: UserEntity) -> MethodResult:
pass
@delete('/manager/security/users/{username}')
def delete_user(self, username: str) -> MethodResult:
pass
@get('/manager/security/roles')
def get_roles(self) -> List[RoleEntity]:
pass
@post('/manager/security/roles', entityKW='entity')
def post_roles(self, entity: RoleEntity) -> MethodResult:
pass
@put('/manager/security/roles', entityKW='entity')
def put_roles(self, entity: List[str]) -> MethodResult:
pass
@get('/manager/security/roles/{role}')
def get_role(self, role: str) -> RoleEntity:
pass
@put('/manager/security/roles/{role}', entityKW='entity')
def put_role(self, role: str, entity: RoleEntity) -> MethodResult:
pass
@delete('/manager/security/roles/{role}')
def delete_role(self, role: str) -> MethodResult:
pass
class PortalSecurityManagement(SecurityManagement):
@get('/manager/security/portalusers')
def get_users(self) -> List[UserInfo]:
pass
| <filename>iclientpy/iclientpy/rest/api/securitymanagement.py
from typing import List
from ..decorator import post, get, put, delete
from .model import UserEntity, UserInfo, RoleEntity, MethodResult
class SecurityManagement:
@get('/manager/security/users')
def get_users(self) -> List[List[str]]:
pass
@post('/manager/security/users', entityKW='entity')
def post_users(self, entity: UserEntity) -> MethodResult:
pass
@put('/manager/security/users', entityKW='entity')
def put_users(self, entity: List[str]) -> MethodResult:
pass
@get('/manager/security/users/{username}')
def get_user(self, username: str) -> UserInfo:
pass
@put('/manager/security/users/{username}', entityKW='entity')
def put_user(self, username: str, entity: UserEntity) -> MethodResult:
pass
@delete('/manager/security/users/{username}')
def delete_user(self, username: str) -> MethodResult:
pass
@get('/manager/security/roles')
def get_roles(self) -> List[RoleEntity]:
pass
@post('/manager/security/roles', entityKW='entity')
def post_roles(self, entity: RoleEntity) -> MethodResult:
pass
@put('/manager/security/roles', entityKW='entity')
def put_roles(self, entity: List[str]) -> MethodResult:
pass
@get('/manager/security/roles/{role}')
def get_role(self, role: str) -> RoleEntity:
pass
@put('/manager/security/roles/{role}', entityKW='entity')
def put_role(self, role: str, entity: RoleEntity) -> MethodResult:
pass
@delete('/manager/security/roles/{role}')
def delete_role(self, role: str) -> MethodResult:
pass
class PortalSecurityManagement(SecurityManagement):
@get('/manager/security/portalusers')
def get_users(self) -> List[UserInfo]:
pass
| none | 1 | 2.402569 | 2 | |
arachni.py | Mrh1l4n9/dorkbott | 1 | 6620093 | <filename>arachni.py
from __future__ import print_function
import sys
import os
import hashlib
import json
from subprocess import call
from io import open
def run(options, url):
dorkbot_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
if "arachni_dir" in options:
arachni_path = os.path.join(os.path.abspath(options["arachni_dir"]), "bin")
elif os.path.isdir(os.path.join(dorkbot_dir, "tools", "arachni", "bin")):
arachni_path = os.path.join(dorkbot_dir, "tools", "arachni", "bin")
else:
arachni_path = ""
arachni_cmd = os.path.join(arachni_path, "arachni")
arachni_reporter_cmd = os.path.join(arachni_path, "arachni_reporter")
if "report_dir" in options:
report_dir = os.path.abspath(options["report_dir"])
else:
report_dir = os.path.join(dorkbot_dir, "reports")
if "checks" in options:
checks = options["checks"].replace(" ", ",")
else:
checks = "active/*,-csrf,-unvalidated_redirect,-source_code_disclosure,-response_splitting,-no_sql_injection_differential"
url_base = url.split("?", 1)[0].replace("(", "%28").replace(")", "%29")
url_hash = hashlib.md5(url.encode("utf-8")).hexdigest()
report = os.path.join(report_dir, url_hash + ".bin")
report_stderr = os.path.join(report_dir, url_hash + ".stderr")
report_json = os.path.join(report_dir, url_hash + ".json")
scan_cmd = [arachni_cmd]
scan_cmd += ["--report-save-path", report]
scan_cmd += ["--timeout", "00:10:00"]
scan_cmd += ["--http-request-concurrency", "1"]
scan_cmd += ["--http-request-queue-size", "25"]
scan_cmd += ["--http-response-max-size", "100000"]
scan_cmd += ["--scope-page-limit", "1"]
scan_cmd += ["--output-only-positives"]
scan_cmd += ["--scope-auto-redundant", "2"]
scan_cmd += ["--scope-include-pattern", url_base]
scan_cmd += ["--checks", checks]
scan_cmd += ["--plugin", "autothrottle"]
scan_cmd += ["--browser-cluster-ignore-images"]
scan_cmd += [url]
report_cmd = [arachni_reporter_cmd]
report_cmd += ["--reporter", "json:outfile="+report_json]
report_cmd += [report]
if os.path.isfile(report) or os.path.isfile(report_stderr):
print("Skipping (found report file): " + url)
else:
print("Scanning: " + url)
report_stderr_f = open(report_stderr, "a")
try:
ret = call(scan_cmd, cwd=arachni_path, stderr=report_stderr_f)
if ret != 0: sys.exit(1)
except OSError as e:
if "No such file or directory" in e:
print("Could not execute arachni. If not in PATH, then download and unpack as /path/to/dorkbot/tools/arachni/ or set arachni_dir option to correct directory.", file=sys.stderr)
report_stderr_f.close()
os.remove(report_stderr)
sys.exit(1)
try:
ret = call(report_cmd, cwd=arachni_path, stderr=report_stderr_f)
if ret != 0: sys.exit(1)
except OSError as e:
if "No such file or directory" in e:
print("Could not execute arachni_reporter. If not in PATH, then download and unpack as /path/to/dorkbot/tools/arachni/ or set arachni_dir option to correct directory.", file=sys.stderr)
report_stderr_f.close()
os.remove(report_stderr)
sys.exit(1)
if os.path.isfile(report_stderr):
report_stderr_f.close()
os.remove(report_stderr)
with open(report_json, encoding="utf-8") as data_file:
contents = data_file.read()
data = json.loads(contents)
vulns = []
for issue in data["issues"]:
vuln = {}
vuln["vulnerability"] = issue["check"]["shortname"]
vuln["url"] = issue["referring_page"]["dom"]["url"]
vuln["parameter"] = issue["vector"]["affected_input_name"]
if "method" in issue["vector"]:
vuln["method"] = issue["vector"]["method"]
else:
vuln["method"] = ""
if issue["check"]["shortname"] == "xss_script_context":
vuln["poc"] = issue["page"]["dom"]["url"].replace("window.top._arachni_js_namespace_taint_tracer.log_execution_flow_sink()", "alert(150)")
elif issue["check"]["shortname"] == "xss_tag":
vuln["poc"] = issue["page"]["dom"]["url"].replace("arachni_xss_in_tag", "autofocus+onfocus=alert(150)+onload=alert(150)+xss")
elif issue["check"]["shortname"] == "xss_path":
vuln["poc"] = issue["page"]["dom"]["url"].replace("%3Cmy_tag", "%3Cimg+src=xyz+onerror=alert(150)%3E%3Cmy_tag")
elif issue["check"]["shortname"] == "xss":
vuln["poc"] = issue["page"]["dom"]["url"].replace("%3Cxss", "%3Cimg+src=xyz+onerror=alert(150)%3E%3Cxss")
else:
vuln["poc"] = issue["page"]["dom"]["url"]
vulns.append(vuln)
return vulns
| <filename>arachni.py
from __future__ import print_function
import sys
import os
import hashlib
import json
from subprocess import call
from io import open
def run(options, url):
dorkbot_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
if "arachni_dir" in options:
arachni_path = os.path.join(os.path.abspath(options["arachni_dir"]), "bin")
elif os.path.isdir(os.path.join(dorkbot_dir, "tools", "arachni", "bin")):
arachni_path = os.path.join(dorkbot_dir, "tools", "arachni", "bin")
else:
arachni_path = ""
arachni_cmd = os.path.join(arachni_path, "arachni")
arachni_reporter_cmd = os.path.join(arachni_path, "arachni_reporter")
if "report_dir" in options:
report_dir = os.path.abspath(options["report_dir"])
else:
report_dir = os.path.join(dorkbot_dir, "reports")
if "checks" in options:
checks = options["checks"].replace(" ", ",")
else:
checks = "active/*,-csrf,-unvalidated_redirect,-source_code_disclosure,-response_splitting,-no_sql_injection_differential"
url_base = url.split("?", 1)[0].replace("(", "%28").replace(")", "%29")
url_hash = hashlib.md5(url.encode("utf-8")).hexdigest()
report = os.path.join(report_dir, url_hash + ".bin")
report_stderr = os.path.join(report_dir, url_hash + ".stderr")
report_json = os.path.join(report_dir, url_hash + ".json")
scan_cmd = [arachni_cmd]
scan_cmd += ["--report-save-path", report]
scan_cmd += ["--timeout", "00:10:00"]
scan_cmd += ["--http-request-concurrency", "1"]
scan_cmd += ["--http-request-queue-size", "25"]
scan_cmd += ["--http-response-max-size", "100000"]
scan_cmd += ["--scope-page-limit", "1"]
scan_cmd += ["--output-only-positives"]
scan_cmd += ["--scope-auto-redundant", "2"]
scan_cmd += ["--scope-include-pattern", url_base]
scan_cmd += ["--checks", checks]
scan_cmd += ["--plugin", "autothrottle"]
scan_cmd += ["--browser-cluster-ignore-images"]
scan_cmd += [url]
report_cmd = [arachni_reporter_cmd]
report_cmd += ["--reporter", "json:outfile="+report_json]
report_cmd += [report]
if os.path.isfile(report) or os.path.isfile(report_stderr):
print("Skipping (found report file): " + url)
else:
print("Scanning: " + url)
report_stderr_f = open(report_stderr, "a")
try:
ret = call(scan_cmd, cwd=arachni_path, stderr=report_stderr_f)
if ret != 0: sys.exit(1)
except OSError as e:
if "No such file or directory" in e:
print("Could not execute arachni. If not in PATH, then download and unpack as /path/to/dorkbot/tools/arachni/ or set arachni_dir option to correct directory.", file=sys.stderr)
report_stderr_f.close()
os.remove(report_stderr)
sys.exit(1)
try:
ret = call(report_cmd, cwd=arachni_path, stderr=report_stderr_f)
if ret != 0: sys.exit(1)
except OSError as e:
if "No such file or directory" in e:
print("Could not execute arachni_reporter. If not in PATH, then download and unpack as /path/to/dorkbot/tools/arachni/ or set arachni_dir option to correct directory.", file=sys.stderr)
report_stderr_f.close()
os.remove(report_stderr)
sys.exit(1)
if os.path.isfile(report_stderr):
report_stderr_f.close()
os.remove(report_stderr)
with open(report_json, encoding="utf-8") as data_file:
contents = data_file.read()
data = json.loads(contents)
vulns = []
for issue in data["issues"]:
vuln = {}
vuln["vulnerability"] = issue["check"]["shortname"]
vuln["url"] = issue["referring_page"]["dom"]["url"]
vuln["parameter"] = issue["vector"]["affected_input_name"]
if "method" in issue["vector"]:
vuln["method"] = issue["vector"]["method"]
else:
vuln["method"] = ""
if issue["check"]["shortname"] == "xss_script_context":
vuln["poc"] = issue["page"]["dom"]["url"].replace("window.top._arachni_js_namespace_taint_tracer.log_execution_flow_sink()", "alert(150)")
elif issue["check"]["shortname"] == "xss_tag":
vuln["poc"] = issue["page"]["dom"]["url"].replace("arachni_xss_in_tag", "autofocus+onfocus=alert(150)+onload=alert(150)+xss")
elif issue["check"]["shortname"] == "xss_path":
vuln["poc"] = issue["page"]["dom"]["url"].replace("%3Cmy_tag", "%3Cimg+src=xyz+onerror=alert(150)%3E%3Cmy_tag")
elif issue["check"]["shortname"] == "xss":
vuln["poc"] = issue["page"]["dom"]["url"].replace("%3Cxss", "%3Cimg+src=xyz+onerror=alert(150)%3E%3Cxss")
else:
vuln["poc"] = issue["page"]["dom"]["url"]
vulns.append(vuln)
return vulns
| none | 1 | 2.348263 | 2 | |
ooobuild/lo/embed/entry_init_modes.py | Amourspirit/ooo_uno_tmpl | 0 | 6620094 | <reponame>Amourspirit/ooo_uno_tmpl<gh_stars>0
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.embed
class EntryInitModes(object):
"""
Const Class
This constant set contains possible modes to initialize object persistence.
See Also:
`API EntryInitModes <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1embed_1_1EntryInitModes.html>`_
"""
__ooo_ns__: str = 'com.sun.star.embed'
__ooo_full_ns__: str = 'com.sun.star.embed.EntryInitModes'
__ooo_type_name__: str = 'const'
DEFAULT_INIT = 0
"""
In case object persistence is created based on existing entry, the object should be initialized from this entry.
Otherwise the object should be initialized as a new one.
"""
TRUNCATE_INIT = 1
"""
The object should be initialized as a new empty one.
"""
NO_INIT = 2
"""
The object should be initialized as a new one only in case it still was not initialized.
If the object initialized already do not reinitialize it.
"""
MEDIA_DESCRIPTOR_INIT = 3
"""
The object should be initialized using additional arguments from provided com.sun.star.document.MediaDescriptor.
"""
URL_LINK_INIT = 4
"""
The object should be initialized as a link using URL provided in additional arguments.
"""
__all__ = ['EntryInitModes']
| # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.embed
class EntryInitModes(object):
"""
Const Class
This constant set contains possible modes to initialize object persistence.
See Also:
`API EntryInitModes <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1embed_1_1EntryInitModes.html>`_
"""
__ooo_ns__: str = 'com.sun.star.embed'
__ooo_full_ns__: str = 'com.sun.star.embed.EntryInitModes'
__ooo_type_name__: str = 'const'
DEFAULT_INIT = 0
"""
In case object persistence is created based on existing entry, the object should be initialized from this entry.
Otherwise the object should be initialized as a new one.
"""
TRUNCATE_INIT = 1
"""
The object should be initialized as a new empty one.
"""
NO_INIT = 2
"""
The object should be initialized as a new one only in case it still was not initialized.
If the object initialized already do not reinitialize it.
"""
MEDIA_DESCRIPTOR_INIT = 3
"""
The object should be initialized using additional arguments from provided com.sun.star.document.MediaDescriptor.
"""
URL_LINK_INIT = 4
"""
The object should be initialized as a link using URL provided in additional arguments.
"""
__all__ = ['EntryInitModes'] | en | 0.828193 | # coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Const Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.embed Const Class This constant set contains possible modes to initialize object persistence. See Also: `API EntryInitModes <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1embed_1_1EntryInitModes.html>`_ In case object persistence is created based on existing entry, the object should be initialized from this entry. Otherwise the object should be initialized as a new one. The object should be initialized as a new empty one. The object should be initialized as a new one only in case it still was not initialized. If the object initialized already do not reinitialize it. The object should be initialized using additional arguments from provided com.sun.star.document.MediaDescriptor. The object should be initialized as a link using URL provided in additional arguments. | 1.777179 | 2 |
Source codes/setup.py | Anindya-tnt/NITA-Di-News | 0 | 6620095 | from cx_Freeze import setup, Executable
import sys
import os
base= None
if sys.platform == 'win32':
ba = "Win32GUI"
execu = [Executable(script = "nita_di_news.py",shortcutName="nita_di_news",
shortcutDir="DesktopFolder", base = ba, copyDependentFiles=True,
appendScriptToExe=True,
appendScriptToLibrary=False, targetName = "nita_di_news.exe")]
setup(
name="NITA NEWS",
options = {"build_exe": {"packages":["os"],"include_files":["..////Resources//nita_icon.ico"]}},
version = "1.1",
description = "View Latest News currently at NITA website",
executables = execu)
| from cx_Freeze import setup, Executable
import sys
import os
base= None
if sys.platform == 'win32':
ba = "Win32GUI"
execu = [Executable(script = "nita_di_news.py",shortcutName="nita_di_news",
shortcutDir="DesktopFolder", base = ba, copyDependentFiles=True,
appendScriptToExe=True,
appendScriptToLibrary=False, targetName = "nita_di_news.exe")]
setup(
name="NITA NEWS",
options = {"build_exe": {"packages":["os"],"include_files":["..////Resources//nita_icon.ico"]}},
version = "1.1",
description = "View Latest News currently at NITA website",
executables = execu)
| none | 1 | 2.005231 | 2 | |
myenv/Lib/site-packages/emailconfirmation/views.py | thestackcoder/notifao_app | 0 | 6620096 | from django.shortcuts import render_to_response
from django.template import RequestContext
from emailconfirmation.models import EmailConfirmation
def confirm_email(request, confirmation_key):
confirmation_key = confirmation_key.lower()
email_address = EmailConfirmation.objects.confirm_email(confirmation_key)
return render_to_response("emailconfirmation/confirm_email.html", {
"email_address": email_address,
}, context_instance=RequestContext(request)) | from django.shortcuts import render_to_response
from django.template import RequestContext
from emailconfirmation.models import EmailConfirmation
def confirm_email(request, confirmation_key):
confirmation_key = confirmation_key.lower()
email_address = EmailConfirmation.objects.confirm_email(confirmation_key)
return render_to_response("emailconfirmation/confirm_email.html", {
"email_address": email_address,
}, context_instance=RequestContext(request)) | none | 1 | 2.003686 | 2 | |
Python/2021/Class 3/Student Code/Shaurya Gupta/homeAssignment2.py | eshanayak/summer-of-qode | 14 | 6620097 | count = 0
nameArray = []
while count < 5:
name = input("Enter your name: ")
nameArray.append(name)
count += 1
for i in range(0,5):
print(nameArray[i])
| count = 0
nameArray = []
while count < 5:
name = input("Enter your name: ")
nameArray.append(name)
count += 1
for i in range(0,5):
print(nameArray[i])
| none | 1 | 3.860537 | 4 | |
util/Configuration.py | EmeryWan/GradeEntry | 3 | 6620098 | import configparser
import getpass
import os
from util.Log import LoggerSingleton
# log 模板
LOG_ERROR_TEMPLATE = "%s --- %s --- ERROR"
LOG_INFO_TEMP = "%s -- %s -- %s -- INFO"
# LoggerSingleton.instance().error(LOG_ERROR_TEMPLATE % (self.__class__.__name__, Tool.get_current_fun_name()))
# LoggerSingleton.instance().info(LOG_INFO_TEMP % (self.__class__.__name__, Tool.get_current_fun_name(), ...))
# 配置文件目录
CONFIG_DIR_PATH = os.path.join(os.getcwd(), "config")
# VersionLad.py
# 系统安装目录
CHROME_INSTALL_PATH = "C:\\Program Files (x86)\\Google\\Chrome\\Application"
# 用户安装目录
CHROME_INSTALL_PATH_USER = "C:\\Users\\" + getpass.getuser() + "\\AppData\\Local\\Google\\Chrome\\Application"
CHROMEDRIVER_VERSION_JSON_NAME = "chromedriver.json"
CHROMEDRIVER_VERSION_JSON_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_VERSION_JSON_NAME)
CHROMEDRIVER_SPIDER_JSON_NAME = "chromedriver_spider.json"
CHROMEDRIVER_SPIDER_JSON_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_SPIDER_JSON_NAME)
CHROMEDRIVER_ZIP_NAME = "chromedriver_win32.zip"
CHROMEDRIVER_ZIP_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_ZIP_NAME)
CHROMEDRIVER_NAME = "chromedriver.exe"
CHROMEDRIVER_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_NAME)
# download
NPM_MIRRORS_URL = "https://npm.taobao.org/mirrors/chromedriver"
# MainView
ECJTU_MAIN_LOGO_PATH = os.path.join(CONFIG_DIR_PATH, "ecjtu_logo.png")
ECJTU_ICON_LOGO_PATH = os.path.join(CONFIG_DIR_PATH, "ecjtu_icon_logo.png")
# 录入界面表格上级 div 类名
CURRENT_PAGE_LEVEL_SELECT_ID = "stype"
MAIN_TABLE_PARENT_DIV_CLASS = "data-tab"
TAG_TABLE = "table"
TAG_TR = "tr"
TAG_INPUT = "input"
TAG_OPTION = "option"
TAG_SELECT = "select"
ATTRIBUTE_TYPE = "type"
ATTRIBUTE_VALUE = "value"
ATTRIBUTE_TEXT_TYPE = "text"
ATTRIBUTE_CHECKBOX_TYPE = "checkbox"
# BrowserController
LOGIN_FORM_ID = "login-action"
USER_INPUT_ID = "inputUser"
PASSWORD_INPUT_ID = "inputPassword"
# 一些标志
HUNDRED_DOUBLE_INPUT_BOOL = False
# DownLoad
DOWNLOAD_ERROR_SIGN = False
UNZIP_ERROR_SIGN = False
class SettingsInfo:
USER_ID = None
PASSWORD = <PASSWORD>
WEBSITE = "https://jwxt.ecjtu.edu.cn/"
EXCEL_FILES_PATH = os.path.join(os.getcwd(), "excel")
BROWSER_EXE_PATH = None
# 该条不能更改 因重构保留
HOMEPAGE = "https://jwxt.ecjtu.edu.cn/"
# 该条不能更改 因重构保留
DRIVER_PATH = os.path.join(os.getcwd(), "config", "chromedriver.exe")
def __init__(self):
try:
self.__config_path = os.path.join(os.getcwd(), "config", "settings.ini")
except BaseException:
self.__config_path = None
if self.__config_path is not None:
self.read_info()
def read_info(self):
ini_config = configparser.ConfigParser()
ini_config.read(self.__config_path, encoding="utf-8")
_user_id = ini_config.get("login", "user")
_password = ini_config.get("login", "password")
_excel_files_path = ini_config.get("excel", "path")
_website = ini_config.get("web", "website")
_browser_exe_path = ini_config.get("browser", "browser_exe_path")
if _excel_files_path != "" and _excel_files_path is not None:
SettingsInfo.EXCEL_FILES_PATH = _excel_files_path
SettingsInfo.EXCEL_FILES_PATH = str(SettingsInfo.EXCEL_FILES_PATH).strip()
LoggerSingleton.instance().info("SettingsInfo -> EXCEL_FILES_PATH " + str(SettingsInfo.EXCEL_FILES_PATH))
if _user_id != "" and _user_id is not None:
SettingsInfo.USER_ID = _user_id
SettingsInfo.USER_ID = str(SettingsInfo.USER_ID).strip()
LoggerSingleton.instance().info("SettingsInfo -> USER_ID " + str(SettingsInfo.USER_ID))
if _password != "" and _password is not None:
SettingsInfo.PASSWORD = <PASSWORD>
SettingsInfo.PASSWORD = str(SettingsInfo.PASSWORD).strip()
if _website != "" and _website is not None:
SettingsInfo.WEBSITE = _website
SettingsInfo.WEBSITE = str(SettingsInfo.WEBSITE).strip()
LoggerSingleton.instance().info("SettingsInfo -> WEBSITE " + str(SettingsInfo.WEBSITE))
if _browser_exe_path != "" and _browser_exe_path is not None:
SettingsInfo.BROWSER_EXE_PATH = _browser_exe_path
SettingsInfo.BROWSER_EXE_PATH = str(SettingsInfo.BROWSER_EXE_PATH).strip()
LoggerSingleton.instance().info("SettingsInfo -> BROWSER_EXE_PATH " + str(SettingsInfo.BROWSER_EXE_PATH))
| import configparser
import getpass
import os
from util.Log import LoggerSingleton
# log 模板
LOG_ERROR_TEMPLATE = "%s --- %s --- ERROR"
LOG_INFO_TEMP = "%s -- %s -- %s -- INFO"
# LoggerSingleton.instance().error(LOG_ERROR_TEMPLATE % (self.__class__.__name__, Tool.get_current_fun_name()))
# LoggerSingleton.instance().info(LOG_INFO_TEMP % (self.__class__.__name__, Tool.get_current_fun_name(), ...))
# 配置文件目录
CONFIG_DIR_PATH = os.path.join(os.getcwd(), "config")
# VersionLad.py
# 系统安装目录
CHROME_INSTALL_PATH = "C:\\Program Files (x86)\\Google\\Chrome\\Application"
# 用户安装目录
CHROME_INSTALL_PATH_USER = "C:\\Users\\" + getpass.getuser() + "\\AppData\\Local\\Google\\Chrome\\Application"
CHROMEDRIVER_VERSION_JSON_NAME = "chromedriver.json"
CHROMEDRIVER_VERSION_JSON_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_VERSION_JSON_NAME)
CHROMEDRIVER_SPIDER_JSON_NAME = "chromedriver_spider.json"
CHROMEDRIVER_SPIDER_JSON_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_SPIDER_JSON_NAME)
CHROMEDRIVER_ZIP_NAME = "chromedriver_win32.zip"
CHROMEDRIVER_ZIP_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_ZIP_NAME)
CHROMEDRIVER_NAME = "chromedriver.exe"
CHROMEDRIVER_PATH = os.path.join(CONFIG_DIR_PATH, CHROMEDRIVER_NAME)
# download
NPM_MIRRORS_URL = "https://npm.taobao.org/mirrors/chromedriver"
# MainView
ECJTU_MAIN_LOGO_PATH = os.path.join(CONFIG_DIR_PATH, "ecjtu_logo.png")
ECJTU_ICON_LOGO_PATH = os.path.join(CONFIG_DIR_PATH, "ecjtu_icon_logo.png")
# 录入界面表格上级 div 类名
CURRENT_PAGE_LEVEL_SELECT_ID = "stype"
MAIN_TABLE_PARENT_DIV_CLASS = "data-tab"
TAG_TABLE = "table"
TAG_TR = "tr"
TAG_INPUT = "input"
TAG_OPTION = "option"
TAG_SELECT = "select"
ATTRIBUTE_TYPE = "type"
ATTRIBUTE_VALUE = "value"
ATTRIBUTE_TEXT_TYPE = "text"
ATTRIBUTE_CHECKBOX_TYPE = "checkbox"
# BrowserController
LOGIN_FORM_ID = "login-action"
USER_INPUT_ID = "inputUser"
PASSWORD_INPUT_ID = "inputPassword"
# 一些标志
HUNDRED_DOUBLE_INPUT_BOOL = False
# DownLoad
DOWNLOAD_ERROR_SIGN = False
UNZIP_ERROR_SIGN = False
class SettingsInfo:
USER_ID = None
PASSWORD = <PASSWORD>
WEBSITE = "https://jwxt.ecjtu.edu.cn/"
EXCEL_FILES_PATH = os.path.join(os.getcwd(), "excel")
BROWSER_EXE_PATH = None
# 该条不能更改 因重构保留
HOMEPAGE = "https://jwxt.ecjtu.edu.cn/"
# 该条不能更改 因重构保留
DRIVER_PATH = os.path.join(os.getcwd(), "config", "chromedriver.exe")
def __init__(self):
try:
self.__config_path = os.path.join(os.getcwd(), "config", "settings.ini")
except BaseException:
self.__config_path = None
if self.__config_path is not None:
self.read_info()
def read_info(self):
ini_config = configparser.ConfigParser()
ini_config.read(self.__config_path, encoding="utf-8")
_user_id = ini_config.get("login", "user")
_password = ini_config.get("login", "password")
_excel_files_path = ini_config.get("excel", "path")
_website = ini_config.get("web", "website")
_browser_exe_path = ini_config.get("browser", "browser_exe_path")
if _excel_files_path != "" and _excel_files_path is not None:
SettingsInfo.EXCEL_FILES_PATH = _excel_files_path
SettingsInfo.EXCEL_FILES_PATH = str(SettingsInfo.EXCEL_FILES_PATH).strip()
LoggerSingleton.instance().info("SettingsInfo -> EXCEL_FILES_PATH " + str(SettingsInfo.EXCEL_FILES_PATH))
if _user_id != "" and _user_id is not None:
SettingsInfo.USER_ID = _user_id
SettingsInfo.USER_ID = str(SettingsInfo.USER_ID).strip()
LoggerSingleton.instance().info("SettingsInfo -> USER_ID " + str(SettingsInfo.USER_ID))
if _password != "" and _password is not None:
SettingsInfo.PASSWORD = <PASSWORD>
SettingsInfo.PASSWORD = str(SettingsInfo.PASSWORD).strip()
if _website != "" and _website is not None:
SettingsInfo.WEBSITE = _website
SettingsInfo.WEBSITE = str(SettingsInfo.WEBSITE).strip()
LoggerSingleton.instance().info("SettingsInfo -> WEBSITE " + str(SettingsInfo.WEBSITE))
if _browser_exe_path != "" and _browser_exe_path is not None:
SettingsInfo.BROWSER_EXE_PATH = _browser_exe_path
SettingsInfo.BROWSER_EXE_PATH = str(SettingsInfo.BROWSER_EXE_PATH).strip()
LoggerSingleton.instance().info("SettingsInfo -> BROWSER_EXE_PATH " + str(SettingsInfo.BROWSER_EXE_PATH))
| zh | 0.525831 | # log 模板 # LoggerSingleton.instance().error(LOG_ERROR_TEMPLATE % (self.__class__.__name__, Tool.get_current_fun_name())) # LoggerSingleton.instance().info(LOG_INFO_TEMP % (self.__class__.__name__, Tool.get_current_fun_name(), ...)) # 配置文件目录 # VersionLad.py # 系统安装目录 # 用户安装目录 # download # MainView # 录入界面表格上级 div 类名 # BrowserController # 一些标志 # DownLoad # 该条不能更改 因重构保留 # 该条不能更改 因重构保留 | 2.113684 | 2 |
simulate.py | manhdao/boid-MPHYSG001 | 0 | 6620099 | <reponame>manhdao/boid-MPHYSG001
from matplotlib import pyplot as plt
from matplotlib import animation
from boids import Flock
def simulate(animation_params, flock_params, boid_params, action='update_boids'):
flock = Flock(flock_params, boid_params)
axes_min, axes_max = animation_params['axes_min'], animation_params['axes_max']
figure = plt.figure()
axes = plt.axes(xlim=(axes_min, axes_max), ylim=(axes_min, axes_max))
scatter = axes.scatter(flock.positions[0], flock.positions[1])
def animate(frame):
if action == 'fly_middle':
flock.fly_middle()
scatter.set_offsets(flock.positions.transpose())
elif action == 'fly_away':
flock.fly_away()
scatter.set_offsets(flock.positions.transpose())
elif action == 'match_speed':
flock.match_speed()
scatter.set_offsets(flock.positions.transpose())
else:
flock.update_boids()
scatter.set_offsets(flock.positions.transpose())
anim = animation.FuncAnimation(figure, animate, frames=animation_params['frames'],
interval=animation_params['interval'])
plt.show()
| from matplotlib import pyplot as plt
from matplotlib import animation
from boids import Flock
def simulate(animation_params, flock_params, boid_params, action='update_boids'):
flock = Flock(flock_params, boid_params)
axes_min, axes_max = animation_params['axes_min'], animation_params['axes_max']
figure = plt.figure()
axes = plt.axes(xlim=(axes_min, axes_max), ylim=(axes_min, axes_max))
scatter = axes.scatter(flock.positions[0], flock.positions[1])
def animate(frame):
if action == 'fly_middle':
flock.fly_middle()
scatter.set_offsets(flock.positions.transpose())
elif action == 'fly_away':
flock.fly_away()
scatter.set_offsets(flock.positions.transpose())
elif action == 'match_speed':
flock.match_speed()
scatter.set_offsets(flock.positions.transpose())
else:
flock.update_boids()
scatter.set_offsets(flock.positions.transpose())
anim = animation.FuncAnimation(figure, animate, frames=animation_params['frames'],
interval=animation_params['interval'])
plt.show() | none | 1 | 2.635853 | 3 | |
Moonrise/DarkForestCreature.py | Malarthi/Salamandbot | 0 | 6620100 | class DarkForestCreature:
baseAttackDelay = 600
attackDelayMulti = 1.0
baseAttackStrength = 60
attackStrengthMulti = 1.0
health = 600
reward = 600
name = 'name'
spawnMesage = ''
def __init__(self, Delay, DelayMulti, Attack, AttackMulti, health, reward):
self.baseAttackDelay = Delay
self.attackDelayMulti = DelayMulti
self.baseAttackStrength = Attack
self.attackStrengthMulti = AttackMulti
self.health = health
self.reward = reward
def getBaseAttackDelay(self):
return self.baseAttackDelay
def getAttackDelayMulti(self):
return self.attackDelayMulti
def getBaseAttackStrength(self):
return self.baseAttackStrength
def getAttackStrengthMulti(self):
return self.attackStrengthMulti
def getHealth(self):
return self.health
def getReward(self):
return self.reward
def getName(self):
return self.name
def setBaseAttackDelay(self, delay):
self.baseAttackDelay = delay
def setAttackDelayMulti(self, multi):
self.attackDelayMulti = multi
def setBaseAttackStrength(self,attack):
self.baseAttackStrength = attack
def setAttackStrengthMulti(self, multi):
self.attackStrengthMulti = multi
def setHealth(self, health):
self.health = health
def setReward(self, reward):
self.reward = reward
def getAttack(self):
retval = self.name + ' attacks the shield for ' + str(int(self.baseAttackStrength * self.attackStrengthMulti)) + '.'
return retval
def getCampfireAttack(self):
retval = 'The shadowy critter takes a single stab at the fire before retreating. It does ' + str(int(self.baseAttackStrength * self.attackStrengthMulti)) + ' damage to the fire.'
return retval
def getSpawnMessage(self):
retval = self.spawnMesage
return retval
| class DarkForestCreature:
baseAttackDelay = 600
attackDelayMulti = 1.0
baseAttackStrength = 60
attackStrengthMulti = 1.0
health = 600
reward = 600
name = 'name'
spawnMesage = ''
def __init__(self, Delay, DelayMulti, Attack, AttackMulti, health, reward):
self.baseAttackDelay = Delay
self.attackDelayMulti = DelayMulti
self.baseAttackStrength = Attack
self.attackStrengthMulti = AttackMulti
self.health = health
self.reward = reward
def getBaseAttackDelay(self):
return self.baseAttackDelay
def getAttackDelayMulti(self):
return self.attackDelayMulti
def getBaseAttackStrength(self):
return self.baseAttackStrength
def getAttackStrengthMulti(self):
return self.attackStrengthMulti
def getHealth(self):
return self.health
def getReward(self):
return self.reward
def getName(self):
return self.name
def setBaseAttackDelay(self, delay):
self.baseAttackDelay = delay
def setAttackDelayMulti(self, multi):
self.attackDelayMulti = multi
def setBaseAttackStrength(self,attack):
self.baseAttackStrength = attack
def setAttackStrengthMulti(self, multi):
self.attackStrengthMulti = multi
def setHealth(self, health):
self.health = health
def setReward(self, reward):
self.reward = reward
def getAttack(self):
retval = self.name + ' attacks the shield for ' + str(int(self.baseAttackStrength * self.attackStrengthMulti)) + '.'
return retval
def getCampfireAttack(self):
retval = 'The shadowy critter takes a single stab at the fire before retreating. It does ' + str(int(self.baseAttackStrength * self.attackStrengthMulti)) + ' damage to the fire.'
return retval
def getSpawnMessage(self):
retval = self.spawnMesage
return retval
| none | 1 | 2.748952 | 3 | |
auth/migrations/0004_alter_user_token.py | Gaming32/and-Beyond-AuthServer | 0 | 6620101 | <gh_stars>0
# Generated by Django 3.2.8 on 2021-10-09 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0003_alter_user_username'),
]
operations = [
migrations.AlterField(
model_name='user',
name='token',
field=models.BinaryField(default=None, max_length=32, null=True, unique=True),
),
]
| # Generated by Django 3.2.8 on 2021-10-09 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0003_alter_user_username'),
]
operations = [
migrations.AlterField(
model_name='user',
name='token',
field=models.BinaryField(default=None, max_length=32, null=True, unique=True),
),
] | en | 0.91124 | # Generated by Django 3.2.8 on 2021-10-09 15:48 | 1.577215 | 2 |
SmartTypes.py | Fear-MK/MKW-Table-Bot | 0 | 6620102 | import UtilityFunctions
import UserDataProcessing
import LoungeAPIFunctions
from typing import List, Union, Tuple
class SmartLookupTypes:
FC = object()
FC_LIST = object()
DISCORD_ID = object()
SELF_DISCORD_ID = object()
MIN_DISCORD_ID = 4194304
MAX_DISCORD_ID = 18446744073709551615
RXX = object()
LOUNGE_NAME = object()
RAW_DISCORD_MENTION = object()
UNKNOWN = object()
ALL_TYPES = {FC, FC_LIST, SELF_DISCORD_ID, DISCORD_ID, RXX, LOUNGE_NAME, RAW_DISCORD_MENTION, UNKNOWN}
PLAYER_LOOKUP_TYPES = {FC, FC_LIST, SELF_DISCORD_ID, DISCORD_ID, LOUNGE_NAME, RAW_DISCORD_MENTION}
ROOM_LOOKUP_TYPES = {RXX} | PLAYER_LOOKUP_TYPES
def __init__(self, data, allowed_types=None):
self.original = data
self.modified_original = data
self._original_type = SmartLookupTypes.UNKNOWN
self._allowed_types = SmartLookupTypes.ALL_TYPES if allowed_types is None else allowed_types
if isinstance(self.modified_original, str):
self.modified_original = self.modified_original.strip().lower()
if SmartLookupTypes.FC in self._allowed_types and UtilityFunctions.is_fc(data):
self._original_type = SmartLookupTypes.FC
elif SmartLookupTypes.RXX in self._allowed_types and UtilityFunctions.is_rLID(data):
self._original_type = SmartLookupTypes.RXX
elif SmartLookupTypes.DISCORD_ID in self._allowed_types and UtilityFunctions.is_int(data) and int(data) >= SmartLookupTypes.MIN_DISCORD_ID and int(data) <= SmartLookupTypes.MAX_DISCORD_ID:
self._original_type = SmartLookupTypes.DISCORD_ID
elif SmartLookupTypes.RAW_DISCORD_MENTION in self._allowed_types and UtilityFunctions.is_discord_mention(data):
self._original_type = SmartLookupTypes.RAW_DISCORD_MENTION
self.modified_original = self.modified_original.strip('<>@! ')
elif SmartLookupTypes.LOUNGE_NAME in self._allowed_types and len(data) > 0:
self._original_type = SmartLookupTypes.LOUNGE_NAME
elif isinstance(data, int):
if SmartLookupTypes.DISCORD_ID in self._allowed_types and int(data) >= SmartLookupTypes.MIN_DISCORD_ID and int(data) <= SmartLookupTypes.MAX_DISCORD_ID:
self._original_type = SmartLookupTypes.DISCORD_ID
self.modified_original = str(self.modified_original).strip()
elif SmartLookupTypes.LOUNGE_NAME in self._allowed_types and len(str(data)) > 0:
self._original_type = SmartLookupTypes.LOUNGE_NAME
self.modified_original = str(self.modified_original).strip()
elif isinstance(data, list):
if SmartLookupTypes.FC_LIST in self._allowed_types and all(isinstance(d, str) for d in data) and all(UtilityFunctions.is_fc(d) for d in data):
self._original_type = SmartLookupTypes.FC_LIST
elif isinstance(data, set):
if SmartLookupTypes.FC_LIST in self._allowed_types and all(isinstance(d, str) for d in data) and all(UtilityFunctions.is_fc(d) for d in data):
self._original_type = SmartLookupTypes.FC_LIST
self.modified_original = list(self.modified_original)
elif isinstance(data, tuple):
if SmartLookupTypes.SELF_DISCORD_ID in self._allowed_types:
if len(data) == 2 and data == create_you_discord_id(data[1]):
self.modified_original = data[1]
self._original_type = SmartLookupTypes.SELF_DISCORD_ID
def add_allowed_type(self, type_):
if type_ not in SmartLookupTypes.ALL_TYPES:
raise ValueError("Invalid lookup type addition")
self._allowed_types.add(type_)
def remove_allowed_type(self, type_):
if type_ in self._allowed_types:
self._allowed_types.remove(type_)
def is_invalid_type(self, type_=None):
type_ = self._original_type if type_ is None else type_
return type_ in self._allowed_types
def get_type(self):
return self._original_type
def get_country_flag(self, suppress_exception=False) -> Union[str, None]:
return UserDataProcessing.get_flag(self.get_discord_id())
def get_discord_id(self, suppress_exception=False) -> Union[int, None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get discord id for unsupported type")
discord_id = None
if self._original_type is SmartLookupTypes.FC:
discord_id = UserDataProcessing.get_discord_id_from_fc(self.modified_original)
elif self._original_type is SmartLookupTypes.FC_LIST:
for fc in self.modified_original:
discord_id = UserDataProcessing.get_discord_id_from_fc(fc)
if discord_id is not None and discord_id != '':
break
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
discord_id = self.modified_original
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
discord_id = UserDataProcessing.get_DiscordID_By_LoungeName(self.modified_original)
return None if discord_id == '' else discord_id
def get_lounge_name(self, suppress_exception=False) -> Union[str, None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get lounge name for unsupported type")
lounge_name = None
if self._original_type is SmartLookupTypes.FC:
lounge_name = UserDataProcessing.lounge_get(self.modified_original)
elif self._original_type is SmartLookupTypes.FC_LIST:
for fc in self.modified_original:
lounge_name = UserDataProcessing.lounge_get(fc)
if lounge_name is not None and lounge_name != '':
break
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
lounge_name = UserDataProcessing.get_lounge(self.modified_original)
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
discord_id = self.get_discord_id()
if discord_id is None:
lounge_name = self.modified_original
else:
lounge_name = UserDataProcessing.get_lounge(discord_id)
return None if lounge_name == '' else lounge_name
def get_fcs(self, suppress_exception=False) -> Union[List[str], None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get fcs for unsupported type")
fcs = []
if self._original_type is SmartLookupTypes.FC:
fcs = [self.modified_original]
elif self._original_type is SmartLookupTypes.FC_LIST:
fcs = self.modified_original
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
fcs = UserDataProcessing.get_all_fcs(self.modified_original)
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
fcs = UserDataProcessing.getFCsByLoungeName(self.modified_original)
return None if (fcs is None or len(fcs) == 0) else fcs
async def lounge_api_update(self, suppress_exception=False):
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot hit Lounge API for unsupported type")
if self._original_type is SmartLookupTypes.FC:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByFCs([self.modified_original]))
elif self._original_type is SmartLookupTypes.FC_LIST:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByFCs(self.modified_original))
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByDiscordIDs([self.modified_original]))
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByLoungeNames([self.modified_original]))
return True
def is_rxx(self):
return self._original_type is SmartLookupTypes.RXX
def is_fc(self):
return self._original_type is SmartLookupTypes.FC
def is_lounge_name(self):
return self._original_type is SmartLookupTypes.LOUNGE_NAME
def is_fc_list(self):
return self._original_type is SmartLookupTypes.FC_LIST
def is_discord_id(self):
return self._original_type is SmartLookupTypes.DISCORD_ID
def is_self_discord_id(self):
return self._original_type is SmartLookupTypes.SELF_DISCORD_ID
def is_discord_mention(self):
return self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION
def is_unknown(self):
return self._original_type is SmartLookupTypes.UNKNOWN
def get_smart_print(self) -> Tuple[str, str]:
'''Based on the type, returns a 2-tuple of strings that most informational messages can use
The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type
The second index is the correct grammatical pronoun of the type (eg they, you, it)
'''
if self.get_type() is SmartLookupTypes.FC:
return f"the FC {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.FC_LIST:
return f"the FCs {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.DISCORD_ID:
return f"the discord ID {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.SELF_DISCORD_ID:
return f"you", "you"
if self.get_type() is SmartLookupTypes.RXX:
return f"the rxx {self.modified_original}", "it"
if self.get_type() is SmartLookupTypes.LOUNGE_NAME:
return f"{self.original}", "they"
if self.get_type() is SmartLookupTypes.RAW_DISCORD_MENTION:
return f"the discord ID {self.modified_original}", "they"
return f"{self.modified_original}", "it"
def get_clean_smart_print(self, message):
'''Based on the type, returns a 2-tuple of strings that most informational messages can use
The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type.
If the given type was a discord mention, the display name of that member will be returned if it can be found, otherwise the discord ID of the mention will be used
The second index is the correct grammatical pronoun of the type (eg they, you, it)
'''
descriptive, pronoun = self.get_smart_print()
if self.get_type() is self.RAW_DISCORD_MENTION:
for mention in message.mentions:
if str(mention.id) == self.modified_original:
descriptive = str(mention.name)
break
return UtilityFunctions.clean_for_output(descriptive), pronoun
def to_be_conjugation(pronoun: str):
conjugations = {"i": "am",
"I": "am",
"you": "are",
"You": "are",
"we": "are",
"We": "are",
"Y'all": "are",
"y'all": "are",
"you all": "are",
"You all": "are",
"They": "are",
"they": "are"}
if pronoun in conjugations:
return conjugations[pronoun]
if pronoun.lower() in conjugations:
return conjugations[pronoun.lower()]
return "is"
def possessive(name: str):
possessive_forms = {"i": "my",
"I": "My",
"you": "your",
"You": "Your",
"we": "our",
"We": "Our",
"Y'all": "Y'all's",
"y'all": "y'all's",
"you all": "you all's",
"You all": "You all's",
"They": "Their",
"they": "their"}
if name in possessive_forms:
return possessive_forms[name]
if name.lower() in possessive_forms:
return possessive_forms[name.lower()]
return f"{name}'" if name.lower().endswith('s') else f"{name}'s"
def capitalize(name: str):
if len(name) == 0:
return name
return name[0].upper() + name[1:]
def create_you_discord_id(discord_id):
return ("you", str(discord_id)) | import UtilityFunctions
import UserDataProcessing
import LoungeAPIFunctions
from typing import List, Union, Tuple
class SmartLookupTypes:
FC = object()
FC_LIST = object()
DISCORD_ID = object()
SELF_DISCORD_ID = object()
MIN_DISCORD_ID = 4194304
MAX_DISCORD_ID = 18446744073709551615
RXX = object()
LOUNGE_NAME = object()
RAW_DISCORD_MENTION = object()
UNKNOWN = object()
ALL_TYPES = {FC, FC_LIST, SELF_DISCORD_ID, DISCORD_ID, RXX, LOUNGE_NAME, RAW_DISCORD_MENTION, UNKNOWN}
PLAYER_LOOKUP_TYPES = {FC, FC_LIST, SELF_DISCORD_ID, DISCORD_ID, LOUNGE_NAME, RAW_DISCORD_MENTION}
ROOM_LOOKUP_TYPES = {RXX} | PLAYER_LOOKUP_TYPES
def __init__(self, data, allowed_types=None):
self.original = data
self.modified_original = data
self._original_type = SmartLookupTypes.UNKNOWN
self._allowed_types = SmartLookupTypes.ALL_TYPES if allowed_types is None else allowed_types
if isinstance(self.modified_original, str):
self.modified_original = self.modified_original.strip().lower()
if SmartLookupTypes.FC in self._allowed_types and UtilityFunctions.is_fc(data):
self._original_type = SmartLookupTypes.FC
elif SmartLookupTypes.RXX in self._allowed_types and UtilityFunctions.is_rLID(data):
self._original_type = SmartLookupTypes.RXX
elif SmartLookupTypes.DISCORD_ID in self._allowed_types and UtilityFunctions.is_int(data) and int(data) >= SmartLookupTypes.MIN_DISCORD_ID and int(data) <= SmartLookupTypes.MAX_DISCORD_ID:
self._original_type = SmartLookupTypes.DISCORD_ID
elif SmartLookupTypes.RAW_DISCORD_MENTION in self._allowed_types and UtilityFunctions.is_discord_mention(data):
self._original_type = SmartLookupTypes.RAW_DISCORD_MENTION
self.modified_original = self.modified_original.strip('<>@! ')
elif SmartLookupTypes.LOUNGE_NAME in self._allowed_types and len(data) > 0:
self._original_type = SmartLookupTypes.LOUNGE_NAME
elif isinstance(data, int):
if SmartLookupTypes.DISCORD_ID in self._allowed_types and int(data) >= SmartLookupTypes.MIN_DISCORD_ID and int(data) <= SmartLookupTypes.MAX_DISCORD_ID:
self._original_type = SmartLookupTypes.DISCORD_ID
self.modified_original = str(self.modified_original).strip()
elif SmartLookupTypes.LOUNGE_NAME in self._allowed_types and len(str(data)) > 0:
self._original_type = SmartLookupTypes.LOUNGE_NAME
self.modified_original = str(self.modified_original).strip()
elif isinstance(data, list):
if SmartLookupTypes.FC_LIST in self._allowed_types and all(isinstance(d, str) for d in data) and all(UtilityFunctions.is_fc(d) for d in data):
self._original_type = SmartLookupTypes.FC_LIST
elif isinstance(data, set):
if SmartLookupTypes.FC_LIST in self._allowed_types and all(isinstance(d, str) for d in data) and all(UtilityFunctions.is_fc(d) for d in data):
self._original_type = SmartLookupTypes.FC_LIST
self.modified_original = list(self.modified_original)
elif isinstance(data, tuple):
if SmartLookupTypes.SELF_DISCORD_ID in self._allowed_types:
if len(data) == 2 and data == create_you_discord_id(data[1]):
self.modified_original = data[1]
self._original_type = SmartLookupTypes.SELF_DISCORD_ID
def add_allowed_type(self, type_):
if type_ not in SmartLookupTypes.ALL_TYPES:
raise ValueError("Invalid lookup type addition")
self._allowed_types.add(type_)
def remove_allowed_type(self, type_):
if type_ in self._allowed_types:
self._allowed_types.remove(type_)
def is_invalid_type(self, type_=None):
type_ = self._original_type if type_ is None else type_
return type_ in self._allowed_types
def get_type(self):
return self._original_type
def get_country_flag(self, suppress_exception=False) -> Union[str, None]:
return UserDataProcessing.get_flag(self.get_discord_id())
def get_discord_id(self, suppress_exception=False) -> Union[int, None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get discord id for unsupported type")
discord_id = None
if self._original_type is SmartLookupTypes.FC:
discord_id = UserDataProcessing.get_discord_id_from_fc(self.modified_original)
elif self._original_type is SmartLookupTypes.FC_LIST:
for fc in self.modified_original:
discord_id = UserDataProcessing.get_discord_id_from_fc(fc)
if discord_id is not None and discord_id != '':
break
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
discord_id = self.modified_original
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
discord_id = UserDataProcessing.get_DiscordID_By_LoungeName(self.modified_original)
return None if discord_id == '' else discord_id
def get_lounge_name(self, suppress_exception=False) -> Union[str, None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get lounge name for unsupported type")
lounge_name = None
if self._original_type is SmartLookupTypes.FC:
lounge_name = UserDataProcessing.lounge_get(self.modified_original)
elif self._original_type is SmartLookupTypes.FC_LIST:
for fc in self.modified_original:
lounge_name = UserDataProcessing.lounge_get(fc)
if lounge_name is not None and lounge_name != '':
break
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
lounge_name = UserDataProcessing.get_lounge(self.modified_original)
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
discord_id = self.get_discord_id()
if discord_id is None:
lounge_name = self.modified_original
else:
lounge_name = UserDataProcessing.get_lounge(discord_id)
return None if lounge_name == '' else lounge_name
def get_fcs(self, suppress_exception=False) -> Union[List[str], None]:
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot get fcs for unsupported type")
fcs = []
if self._original_type is SmartLookupTypes.FC:
fcs = [self.modified_original]
elif self._original_type is SmartLookupTypes.FC_LIST:
fcs = self.modified_original
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
fcs = UserDataProcessing.get_all_fcs(self.modified_original)
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
fcs = UserDataProcessing.getFCsByLoungeName(self.modified_original)
return None if (fcs is None or len(fcs) == 0) else fcs
async def lounge_api_update(self, suppress_exception=False):
if self._original_type not in SmartLookupTypes.PLAYER_LOOKUP_TYPES:
if suppress_exception:
return None
raise ValueError("Cannot hit Lounge API for unsupported type")
if self._original_type is SmartLookupTypes.FC:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByFCs([self.modified_original]))
elif self._original_type is SmartLookupTypes.FC_LIST:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByFCs(self.modified_original))
elif self._original_type is SmartLookupTypes.DISCORD_ID or self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION or self._original_type is SmartLookupTypes.SELF_DISCORD_ID:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByDiscordIDs([self.modified_original]))
elif self._original_type is SmartLookupTypes.LOUNGE_NAME:
UserDataProcessing.smartUpdate(* await LoungeAPIFunctions.getByLoungeNames([self.modified_original]))
return True
def is_rxx(self):
return self._original_type is SmartLookupTypes.RXX
def is_fc(self):
return self._original_type is SmartLookupTypes.FC
def is_lounge_name(self):
return self._original_type is SmartLookupTypes.LOUNGE_NAME
def is_fc_list(self):
return self._original_type is SmartLookupTypes.FC_LIST
def is_discord_id(self):
return self._original_type is SmartLookupTypes.DISCORD_ID
def is_self_discord_id(self):
return self._original_type is SmartLookupTypes.SELF_DISCORD_ID
def is_discord_mention(self):
return self._original_type is SmartLookupTypes.RAW_DISCORD_MENTION
def is_unknown(self):
return self._original_type is SmartLookupTypes.UNKNOWN
def get_smart_print(self) -> Tuple[str, str]:
'''Based on the type, returns a 2-tuple of strings that most informational messages can use
The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type
The second index is the correct grammatical pronoun of the type (eg they, you, it)
'''
if self.get_type() is SmartLookupTypes.FC:
return f"the FC {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.FC_LIST:
return f"the FCs {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.DISCORD_ID:
return f"the discord ID {self.modified_original}", "they"
if self.get_type() is SmartLookupTypes.SELF_DISCORD_ID:
return f"you", "you"
if self.get_type() is SmartLookupTypes.RXX:
return f"the rxx {self.modified_original}", "it"
if self.get_type() is SmartLookupTypes.LOUNGE_NAME:
return f"{self.original}", "they"
if self.get_type() is SmartLookupTypes.RAW_DISCORD_MENTION:
return f"the discord ID {self.modified_original}", "they"
return f"{self.modified_original}", "it"
def get_clean_smart_print(self, message):
'''Based on the type, returns a 2-tuple of strings that most informational messages can use
The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type.
If the given type was a discord mention, the display name of that member will be returned if it can be found, otherwise the discord ID of the mention will be used
The second index is the correct grammatical pronoun of the type (eg they, you, it)
'''
descriptive, pronoun = self.get_smart_print()
if self.get_type() is self.RAW_DISCORD_MENTION:
for mention in message.mentions:
if str(mention.id) == self.modified_original:
descriptive = str(mention.name)
break
return UtilityFunctions.clean_for_output(descriptive), pronoun
def to_be_conjugation(pronoun: str):
conjugations = {"i": "am",
"I": "am",
"you": "are",
"You": "are",
"we": "are",
"We": "are",
"Y'all": "are",
"y'all": "are",
"you all": "are",
"You all": "are",
"They": "are",
"they": "are"}
if pronoun in conjugations:
return conjugations[pronoun]
if pronoun.lower() in conjugations:
return conjugations[pronoun.lower()]
return "is"
def possessive(name: str):
possessive_forms = {"i": "my",
"I": "My",
"you": "your",
"You": "Your",
"we": "our",
"We": "Our",
"Y'all": "Y'all's",
"y'all": "y'all's",
"you all": "you all's",
"You all": "You all's",
"They": "Their",
"they": "their"}
if name in possessive_forms:
return possessive_forms[name]
if name.lower() in possessive_forms:
return possessive_forms[name.lower()]
return f"{name}'" if name.lower().endswith('s') else f"{name}'s"
def capitalize(name: str):
if len(name) == 0:
return name
return name[0].upper() + name[1:]
def create_you_discord_id(discord_id):
return ("you", str(discord_id)) | en | 0.853113 | Based on the type, returns a 2-tuple of strings that most informational messages can use The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type The second index is the correct grammatical pronoun of the type (eg they, you, it) Based on the type, returns a 2-tuple of strings that most informational messages can use The first index in the tuple is a descriptive of the SmartLookupType type along with the actual modified type. If the given type was a discord mention, the display name of that member will be returned if it can be found, otherwise the discord ID of the mention will be used The second index is the correct grammatical pronoun of the type (eg they, you, it) | 2.476028 | 2 |
sefara/commands/__init__.py | timodonnell/pathase | 0 | 6620103 | from . import check, dump, env, select
__all__ = ["check", "dump", "env", "select"] | from . import check, dump, env, select
__all__ = ["check", "dump", "env", "select"] | none | 1 | 1.1436 | 1 | |
src/skdh/utility/_extensions/__init__.py | PfizerRD/scikit-digital-health | 1 | 6620104 | from .moving_moments import moving_mean, moving_sd, moving_skewness, moving_kurtosis
from .moving_median import moving_median
__all__ = [
"moving_mean",
"moving_sd",
"moving_skewness",
"moving_kurtosis",
"moving_median",
]
| from .moving_moments import moving_mean, moving_sd, moving_skewness, moving_kurtosis
from .moving_median import moving_median
__all__ = [
"moving_mean",
"moving_sd",
"moving_skewness",
"moving_kurtosis",
"moving_median",
]
| none | 1 | 1.351769 | 1 | |
engine/tvm/ssd_mxnet/compile_ssd.py | mengyaliu/DLRU | 2 | 6620105 | <reponame>mengyaliu/DLRU<gh_stars>1-10
# -*- coding: utf-8 -*-
import tvm
import sys, os
from tvm.relay.testing.config import ctx_list
from tvm import relay
from gluoncv import model_zoo, data, utils
from tvm.contrib import util
supported_model = [
'ssd_512_resnet50_v1_voc',
'ssd_512_resnet50_v1_coco',
'ssd_512_resnet101_v2_voc',
'ssd_512_mobilenet1.0_voc',
'ssd_512_mobilenet1.0_coco',
'ssd_300_vgg16_atrous_voc'
'ssd_512_vgg16_atrous_coco',
]
model_name = supported_model[0]
dshape = (1, 3, 512, 512)
if sys.argv[1] == 'cpu':
target = 'llvm'
else:
target = 'cuda'
# download model
block = model_zoo.get_model(model_name, pretrained=True)
# function of compiling model
def build(target):
mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
return graph, lib, params
# compile model and save them to files
graph, lib, params = build(target)
tmp_dir = sys.argv[2]
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
path_lib = tmp_dir + "deploy_lib.tar"
path_graph = tmp_dir + "deploy_graph.json"
path_params = tmp_dir + "deploy_param.params"
lib.export_library(path_lib)
with open(path_graph, "w") as fo:
fo.write(graph)
with open(path_params, "wb") as fo:
fo.write(relay.save_param_dict(params))
| # -*- coding: utf-8 -*-
import tvm
import sys, os
from tvm.relay.testing.config import ctx_list
from tvm import relay
from gluoncv import model_zoo, data, utils
from tvm.contrib import util
supported_model = [
'ssd_512_resnet50_v1_voc',
'ssd_512_resnet50_v1_coco',
'ssd_512_resnet101_v2_voc',
'ssd_512_mobilenet1.0_voc',
'ssd_512_mobilenet1.0_coco',
'ssd_300_vgg16_atrous_voc'
'ssd_512_vgg16_atrous_coco',
]
model_name = supported_model[0]
dshape = (1, 3, 512, 512)
if sys.argv[1] == 'cpu':
target = 'llvm'
else:
target = 'cuda'
# download model
block = model_zoo.get_model(model_name, pretrained=True)
# function of compiling model
def build(target):
mod, params = relay.frontend.from_mxnet(block, {"data": dshape})
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
return graph, lib, params
# compile model and save them to files
graph, lib, params = build(target)
tmp_dir = sys.argv[2]
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
path_lib = tmp_dir + "deploy_lib.tar"
path_graph = tmp_dir + "deploy_graph.json"
path_params = tmp_dir + "deploy_param.params"
lib.export_library(path_lib)
with open(path_graph, "w") as fo:
fo.write(graph)
with open(path_params, "wb") as fo:
fo.write(relay.save_param_dict(params)) | en | 0.830243 | # -*- coding: utf-8 -*- # download model # function of compiling model # compile model and save them to files | 2.02671 | 2 |
alice_blue_api/websocket.py | DibyaranjanSathua/stocklabs | 1 | 6620106 | """
File: websocket.py
Author: <NAME>
Created on: 20/06/21, 7:36 pm
https://websocket-client.readthedocs.io/en/latest/app.html#websocket._app.WebSocketApp.__init__
"""
from typing import Optional
import json
import threading
import time
import websocket
from alice_blue_api.api import AliceBlueApi
from alice_blue_api.websocket_streams import MarketData, CompactMarketData
from alice_blue_api.option_chain import OptionChain
class AliceBlueWebSocket:
""" Web socket connection to get live feed market data """
WS_ENDPOINT: str = 'wss://ant.aliceblueonline.com/hydrasocket/v2/websocket' \
'?access_token={access_token}'
def __init__(self):
self._websocket: Optional[websocket.WebSocketApp] = None
self._connected = False
self._websocket_thread = None
self._alice_blue_api_handler: AliceBlueApi = AliceBlueApi.get_handler()
def connect(self):
""" Connect to web socket """
url = self.WS_ENDPOINT.format(access_token=self._alice_blue_api_handler.access_token)
self._websocket = websocket.WebSocketApp(
url=url,
on_open=self.on_open,
on_close=self.on_close,
on_message=self.on_message
)
def _run_forever(self):
""" Run the websocket forever """
while True:
try:
self._websocket.run_forever()
except Exception as err:
print(f"Exception in websocket, {err}")
time.sleep(1)
def start(self, thread=True):
""" Start websocket. If thread is True, it will run in a different thread """
self.connect()
if thread:
print(f"Starting websocket connection in a thread")
self._websocket_thread = threading.Thread(target=self._run_forever)
self._websocket_thread.daemon = True
self._websocket_thread.start()
else:
self._run_forever()
def on_message(self, ws, message):
""" on message callback """
print("Receive message. Update option chain")
market_data = MarketData.create(message)
option_chain = OptionChain.get_instance()
option_chain.update(market_data)
def on_open(self, ws):
""" on open callback """
print("Connection open")
self._connected = True
def on_close(self, ws):
""" Connection closed """
print("Connection closed")
self._connected = False
def send(self, data, opcode=websocket.ABNF.OPCODE_TEXT):
""" Send data to web socket api """
data = json.dumps(data)
if self._connected:
self._websocket.send(data=data, opcode=opcode)
def _send_heartbeat(self):
""" Send heartbeat in every 10 sec to keep the web socket connection alive """
data = {"a": "h", "v": [], "m": ""}
while True:
time.sleep(5)
self.send(data, opcode=websocket.ABNF.OPCODE_PING)
def send_heartbeat(self):
""" Wrapper to run send_heartbeat in thread """
thread = threading.Thread(target=self._send_heartbeat)
thread.daemon = True
thread.start()
def wait_until_connection_open(self):
""" Wait till web socket connection is open """
while not self._connected:
time.sleep(0.01)
@property
def connected(self) -> bool:
return self._connected
| """
File: websocket.py
Author: <NAME>
Created on: 20/06/21, 7:36 pm
https://websocket-client.readthedocs.io/en/latest/app.html#websocket._app.WebSocketApp.__init__
"""
from typing import Optional
import json
import threading
import time
import websocket
from alice_blue_api.api import AliceBlueApi
from alice_blue_api.websocket_streams import MarketData, CompactMarketData
from alice_blue_api.option_chain import OptionChain
class AliceBlueWebSocket:
""" Web socket connection to get live feed market data """
WS_ENDPOINT: str = 'wss://ant.aliceblueonline.com/hydrasocket/v2/websocket' \
'?access_token={access_token}'
def __init__(self):
self._websocket: Optional[websocket.WebSocketApp] = None
self._connected = False
self._websocket_thread = None
self._alice_blue_api_handler: AliceBlueApi = AliceBlueApi.get_handler()
def connect(self):
""" Connect to web socket """
url = self.WS_ENDPOINT.format(access_token=self._alice_blue_api_handler.access_token)
self._websocket = websocket.WebSocketApp(
url=url,
on_open=self.on_open,
on_close=self.on_close,
on_message=self.on_message
)
def _run_forever(self):
""" Run the websocket forever """
while True:
try:
self._websocket.run_forever()
except Exception as err:
print(f"Exception in websocket, {err}")
time.sleep(1)
def start(self, thread=True):
""" Start websocket. If thread is True, it will run in a different thread """
self.connect()
if thread:
print(f"Starting websocket connection in a thread")
self._websocket_thread = threading.Thread(target=self._run_forever)
self._websocket_thread.daemon = True
self._websocket_thread.start()
else:
self._run_forever()
def on_message(self, ws, message):
""" on message callback """
print("Receive message. Update option chain")
market_data = MarketData.create(message)
option_chain = OptionChain.get_instance()
option_chain.update(market_data)
def on_open(self, ws):
""" on open callback """
print("Connection open")
self._connected = True
def on_close(self, ws):
""" Connection closed """
print("Connection closed")
self._connected = False
def send(self, data, opcode=websocket.ABNF.OPCODE_TEXT):
""" Send data to web socket api """
data = json.dumps(data)
if self._connected:
self._websocket.send(data=data, opcode=opcode)
def _send_heartbeat(self):
""" Send heartbeat in every 10 sec to keep the web socket connection alive """
data = {"a": "h", "v": [], "m": ""}
while True:
time.sleep(5)
self.send(data, opcode=websocket.ABNF.OPCODE_PING)
def send_heartbeat(self):
""" Wrapper to run send_heartbeat in thread """
thread = threading.Thread(target=self._send_heartbeat)
thread.daemon = True
thread.start()
def wait_until_connection_open(self):
""" Wait till web socket connection is open """
while not self._connected:
time.sleep(0.01)
@property
def connected(self) -> bool:
return self._connected
| en | 0.65366 | File: websocket.py Author: <NAME> Created on: 20/06/21, 7:36 pm https://websocket-client.readthedocs.io/en/latest/app.html#websocket._app.WebSocketApp.__init__ Web socket connection to get live feed market data Connect to web socket Run the websocket forever Start websocket. If thread is True, it will run in a different thread on message callback on open callback Connection closed Send data to web socket api Send heartbeat in every 10 sec to keep the web socket connection alive Wrapper to run send_heartbeat in thread Wait till web socket connection is open | 2.610019 | 3 |
softbankRobotics/naoqi-tablet-simulator/examples/test.py | Cmathou/S8-Simulated-Pepper-Project | 0 | 6620107 | <gh_stars>0
#!/usr/bin/env python
# based on http://doc.aldebaran.com/2-5/naoqi/core/altabletservice-api.html#ALTabletService::onTouchDown__qi::Signal:float.float:
import qi
import argparse
import sys
import time
def main(session):
try:
tabletService = session.service("ALTabletService")
signalID = 0
# test of onTouchDown signal from the tablet
# AND of showImage() and hideImage() methods
# depending on which part of the screen is touched,
# display different images during 3s then hide them
def callback(x, y):
print "signal onTouchDown(" + str(x) + ", " + str(y) + ") received"
xMax = 1280
if (x < xMax/2):
# left half of the screen
tabletService.showImage("image_left.png")
else:
# right half of the screen
tabletService.showImage("image_right.png")
time.sleep(3)
tabletService.hideImage()
signalID = tabletService.onTouchDown.connect(callback)
print "connected signal onTouchDown (" + str(signalID) + ")"
# let it run for 30s
time.sleep(30)
tabletService.hideImage()
tabletService.onTouchDown.disconnect(signalID)
except Exception, e:
print "Error: ", e
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session) | #!/usr/bin/env python
# based on http://doc.aldebaran.com/2-5/naoqi/core/altabletservice-api.html#ALTabletService::onTouchDown__qi::Signal:float.float:
import qi
import argparse
import sys
import time
def main(session):
try:
tabletService = session.service("ALTabletService")
signalID = 0
# test of onTouchDown signal from the tablet
# AND of showImage() and hideImage() methods
# depending on which part of the screen is touched,
# display different images during 3s then hide them
def callback(x, y):
print "signal onTouchDown(" + str(x) + ", " + str(y) + ") received"
xMax = 1280
if (x < xMax/2):
# left half of the screen
tabletService.showImage("image_left.png")
else:
# right half of the screen
tabletService.showImage("image_right.png")
time.sleep(3)
tabletService.hideImage()
signalID = tabletService.onTouchDown.connect(callback)
print "connected signal onTouchDown (" + str(signalID) + ")"
# let it run for 30s
time.sleep(30)
tabletService.hideImage()
tabletService.onTouchDown.disconnect(signalID)
except Exception, e:
print "Error: ", e
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session) | en | 0.732971 | #!/usr/bin/env python # based on http://doc.aldebaran.com/2-5/naoqi/core/altabletservice-api.html#ALTabletService::onTouchDown__qi::Signal:float.float: # test of onTouchDown signal from the tablet # AND of showImage() and hideImage() methods # depending on which part of the screen is touched, # display different images during 3s then hide them # left half of the screen # right half of the screen # let it run for 30s | 2.571389 | 3 |
indra/lib/python/indra/util/llsubprocess.py | humbletim/archived-casviewer | 0 | 6620108 | """\
@file llsubprocess.py
@author Phoenix
@date 2008-01-18
@brief The simplest possible wrapper for a common sub-process paradigm.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import os
import popen2
import time
import select
class Timeout(RuntimeError):
"Exception raised when a subprocess times out."
pass
def run(command, args=None, data=None, timeout=None):
"""\
@brief Run command with arguments
This is it. This is the function I want to run all the time when doing
subprocces, but end up copying the code everywhere. none of the
standard commands are secure and provide a way to specify input, get
all the output, and get the result.
@param command A string specifying a process to launch.
@param args Arguments to be passed to command. Must be list, tuple or None.
@param data input to feed to the command.
@param timeout Maximum number of many seconds to run.
@return Returns (result, stdout, stderr) from process.
"""
cmd = [command]
if args:
cmd.extend([str(arg) for arg in args])
#print "cmd: ","' '".join(cmd)
child = popen2.Popen3(cmd, True)
#print child.pid
out = []
err = []
result = -1
time_left = timeout
tochild = [child.tochild.fileno()]
while True:
time_start = time.time()
#print "time:",time_left
p_in, p_out, p_err = select.select(
[child.fromchild.fileno(), child.childerr.fileno()],
tochild,
[],
time_left)
if p_in:
new_line = os.read(child.fromchild.fileno(), 32 * 1024)
if new_line:
#print "line:",new_line
out.append(new_line)
new_line = os.read(child.childerr.fileno(), 32 * 1024)
if new_line:
#print "error:", new_line
err.append(new_line)
if p_out:
if data:
#print "p_out"
bytes = os.write(child.tochild.fileno(), data)
data = data[bytes:]
if len(data) == 0:
data = None
tochild = []
child.tochild.close()
result = child.poll()
if result != -1:
# At this point, the child process has exited and result
# is the return value from the process. Between the time
# we called select() and poll() the process may have
# exited so read all the data left on the child process
# stdout and stderr.
last = child.fromchild.read()
if last:
out.append(last)
last = child.childerr.read()
if last:
err.append(last)
child.tochild.close()
child.fromchild.close()
child.childerr.close()
break
if time_left is not None:
time_left -= (time.time() - time_start)
if time_left < 0:
raise Timeout
#print "result:",result
out = ''.join(out)
#print "stdout:", out
err = ''.join(err)
#print "stderr:", err
return result, out, err
| """\
@file llsubprocess.py
@author Phoenix
@date 2008-01-18
@brief The simplest possible wrapper for a common sub-process paradigm.
$LicenseInfo:firstyear=2007&license=mit$
Copyright (c) 2007-2009, Linden Research, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
$/LicenseInfo$
"""
import os
import popen2
import time
import select
class Timeout(RuntimeError):
"Exception raised when a subprocess times out."
pass
def run(command, args=None, data=None, timeout=None):
"""\
@brief Run command with arguments
This is it. This is the function I want to run all the time when doing
subprocces, but end up copying the code everywhere. none of the
standard commands are secure and provide a way to specify input, get
all the output, and get the result.
@param command A string specifying a process to launch.
@param args Arguments to be passed to command. Must be list, tuple or None.
@param data input to feed to the command.
@param timeout Maximum number of many seconds to run.
@return Returns (result, stdout, stderr) from process.
"""
cmd = [command]
if args:
cmd.extend([str(arg) for arg in args])
#print "cmd: ","' '".join(cmd)
child = popen2.Popen3(cmd, True)
#print child.pid
out = []
err = []
result = -1
time_left = timeout
tochild = [child.tochild.fileno()]
while True:
time_start = time.time()
#print "time:",time_left
p_in, p_out, p_err = select.select(
[child.fromchild.fileno(), child.childerr.fileno()],
tochild,
[],
time_left)
if p_in:
new_line = os.read(child.fromchild.fileno(), 32 * 1024)
if new_line:
#print "line:",new_line
out.append(new_line)
new_line = os.read(child.childerr.fileno(), 32 * 1024)
if new_line:
#print "error:", new_line
err.append(new_line)
if p_out:
if data:
#print "p_out"
bytes = os.write(child.tochild.fileno(), data)
data = data[bytes:]
if len(data) == 0:
data = None
tochild = []
child.tochild.close()
result = child.poll()
if result != -1:
# At this point, the child process has exited and result
# is the return value from the process. Between the time
# we called select() and poll() the process may have
# exited so read all the data left on the child process
# stdout and stderr.
last = child.fromchild.read()
if last:
out.append(last)
last = child.childerr.read()
if last:
err.append(last)
child.tochild.close()
child.fromchild.close()
child.childerr.close()
break
if time_left is not None:
time_left -= (time.time() - time_start)
if time_left < 0:
raise Timeout
#print "result:",result
out = ''.join(out)
#print "stdout:", out
err = ''.join(err)
#print "stderr:", err
return result, out, err
| en | 0.741886 | \ @file llsubprocess.py @author Phoenix @date 2008-01-18 @brief The simplest possible wrapper for a common sub-process paradigm. $LicenseInfo:firstyear=2007&license=mit$ Copyright (c) 2007-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. $/LicenseInfo$ \ @brief Run command with arguments This is it. This is the function I want to run all the time when doing subprocces, but end up copying the code everywhere. none of the standard commands are secure and provide a way to specify input, get all the output, and get the result. @param command A string specifying a process to launch. @param args Arguments to be passed to command. Must be list, tuple or None. @param data input to feed to the command. @param timeout Maximum number of many seconds to run. @return Returns (result, stdout, stderr) from process. #print "cmd: ","' '".join(cmd) #print child.pid #print "time:",time_left #print "line:",new_line #print "error:", new_line #print "p_out" # At this point, the child process has exited and result # is the return value from the process. Between the time # we called select() and poll() the process may have # exited so read all the data left on the child process # stdout and stderr. #print "result:",result #print "stdout:", out #print "stderr:", err | 2.457791 | 2 |
demos/HFL/communicator/com_utils.py | monadyn/fedlearn-algo | 86 | 6620109 | <filename>demos/HFL/communicator/com_utils.py
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys
from typing import Any, Optional, Union, Dict, List
from abc import abstractmethod,ABC
root_path = os.getcwd()
sys.path.append(root_path)
sys.path.append(os.path.join(root_path,'demos/HFL'))
from demos.HFL.common.hfl_message import HFL_MSG
from demos.HFL.common.msg_handler import (Msg_Handler, Raw_Msg_Observer)
from demos.HFL.communicator.base_communicator import BaseCommunicator
from core.entity.common.machineinfo import MachineInfo
import queue
import threading
lock = threading.Lock()
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(module) s - %(funcName) s - %(lineno) d - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class AttributeDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Com_Machine_Info(ABC):
def get_id(self)->str:
'''return machine identification '''
class HFL_Message_Raw_Converter():
"""
Convert between HFL_MSG and low-level communication specific data/message format,
The HFL framework allow users to build their own communication methods such as grpc, http, socket or MPI etc.
To replace framework provided communication methods with customized ones, user need to implement <raw2HFLMsg> and <HFLMsg2raw>
functions to convert between HFL_MSG and their own data/message used in lower-level user build communication methods.
"""
@abstractmethod
def raw2HFLMsg(self,rawMsg:Any)->HFL_MSG:
'''
Convert raw message of base communication to HFL message
Parameters:
----------
rawMsg: raw message of base communication
Return:
_______
HFL_MSG: converted HFL message
'''
pass
@abstractmethod
def HFLMsg2raw(self,msg:HFL_MSG)->Any:
'''
Convert HFL message to message of base communication
Parameters:
----------
msg: HFL message
Return:
----------
message of base communication
'''
pass
class Message_Receiver(object):
def __init__(self,
config,
msg_observer:Raw_Msg_Observer=None):
'''
Message_Receiver are suppose to work as dispatcher, which runs on thread or new process to receive message send from remote machine(s),
and then forward to observer if it is given.
Parameters:
----------
config :Dict[Union[str,str,float,int]], receiver's configuration
msg_observer: Observer that gets notified when raw message is received
'''
self.config = config
self.msg_observer: Raw_Msg_Observer = msg_observer
def set_msg_observer(self,
observer:Raw_Msg_Observer,
)->None:
'''
Set observer that will receive raw message forwarded from <Message_Receiver>
Parameters:
----------
observer: Observer that gets notified when raw message is received
Return:
----------
None
'''
self.msg_observer = observer
def receiving_msg(self, data:Any)->Any:
'''
Parameters:
----------
data: raw message received
Return:
----------
raw message
'''
if self.msg_observer:
return self.msg_observer.receive_message(data)
@abstractmethod
def start(self):
pass
@abstractmethod
def stop(self):
pass
# Response = Union[str,bytes]
class Message_Sender():
""" Message_Sender send message/data to remote machine via <send> func """
def __init__(self,
receiver_info:Com_Machine_Info):
self.receiver_info = receiver_info
def get_receiver_info(self)->Com_Machine_Info:
return self.receiver_info
@abstractmethod
def send(self, data:Any)->Any:
pass
class GeneralCommunicator(BaseCommunicator):
def __init__(self,
sender:Message_Sender,
receiver:Message_Receiver,
msg_converter:HFL_Message_Raw_Converter,
mode ='client'):
self.mode = mode
self.sender = sender
self.receiver = receiver
self.msg_converter = msg_converter
self.receiver.set_msg_observer(self)
self._msg_handlers = []
self.msg_queue = queue.Queue()
self.is_running = False
def receive_message(self, data: Any) -> Any:
''' Convert receiver's Raw message into HFL_MSG and put to processing queque '''
msg:HFL_MSG = self.msg_converter.raw2HFLMsg(data)
logger.info(f'{type(self).__name__} receiverd msg :Type= {msg.type}')
lock.acquire()
self.msg_queue.put(msg)
lock.release()
resp_msg = HFL_MSG(HFL_MSG.CONTROL_RECEIVE,
msg.sender,
msg.receiver)
raw_resp_data = self.msg_converter.HFLMsg2raw(resp_msg)
logger.info(f'{type(self).__name__} port:{msg.receiver.port} put msg into queque: Type= {msg.type}')
return raw_resp_data
#@abstractmethod
def run(self):
# # 1. start gprc service that reive msg and stor in quque
# thread = threading.Thread(target=grpc_server.serve, args=(self.receiver,))
# thread.start()
self.start_message_receiving_routine()
# #2. start main message routine that retreive HFL_MSG and routine to corresponding processing fuction
self.is_running = True
self.msg_handling_routine()
@abstractmethod
def start_message_receiving_routine(self):
pass
def stop(self):
self.is_running = False
def msg_handling_routine(self):
'''
Start Message routine, once msg received forward to upstream handler,
Note that it is critical NOT to start a new thread at client side to avoid "slow CUDA GPU training"
'''
while self.is_running:
if self.msg_queue.qsize() > 0:
lock.acquire()
msg : HFL_MSG = self.msg_queue.get()
lock.release()
msg_type = msg.get_type()
logger.info(f'{type(self).__name__} in Mode:{self.mode} msg routine handling ==========>>: Type = {msg_type}')
for handler in self._msg_handlers:
logger.info(f'{type(self).__name__} : Forward msg to {type(handler)}----->>:type={msg_type}')
# Critical: set mode to "client" to avoid starting thread for GPU training, which cause very slow training!
if self.mode =='client':
handler.handle_message(msg_type, msg)
elif self.mode=='proxy':
threading.Thread(target=handler.handle_message, args=(msg_type, msg,)).start()
else:
raise(ValueError(f'Mode {self.mode} is not a valid mode'))
return
def add_msg_handler(self, handler:Msg_Handler)->None:
self._msg_handlers.append(handler)
def send_message(self, msg:HFL_MSG)->HFL_MSG:
res_msg = self.msg_converter.HFLMsg2raw(msg)
raw_msg:Any = \
self.sender.send(res_msg)
return self.msg_converter.raw2HFLMsg(raw_msg)
def remove_msg_handler(self, handler: Msg_Handler):
try:
self._msg_handlers.remove(handler)
except Exception as e:
logger.info(f'{type(self)} error {e}')
def get_MachineInfo(self)->MachineInfo:
return self.receiver.machine_info
class MachineInfo_Wrapper(Com_Machine_Info):
def __init__(self, ip, port, token='<PASSWORD>'):
self.core_MachineInfo = MachineInfo(ip,port,token)
def get_id(self)->str:
rep_str = f'{self.core_MachineInfo.ip}:{self.core_MachineInfo.port}'
return rep_str
def __repr__(self) -> str:
return self.get_id()
def get_CoreMachineInfo(self):
return self.core_MachineInfo
| <filename>demos/HFL/communicator/com_utils.py
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys
from typing import Any, Optional, Union, Dict, List
from abc import abstractmethod,ABC
root_path = os.getcwd()
sys.path.append(root_path)
sys.path.append(os.path.join(root_path,'demos/HFL'))
from demos.HFL.common.hfl_message import HFL_MSG
from demos.HFL.common.msg_handler import (Msg_Handler, Raw_Msg_Observer)
from demos.HFL.communicator.base_communicator import BaseCommunicator
from core.entity.common.machineinfo import MachineInfo
import queue
import threading
lock = threading.Lock()
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(module) s - %(funcName) s - %(lineno) d - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class AttributeDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Com_Machine_Info(ABC):
def get_id(self)->str:
'''return machine identification '''
class HFL_Message_Raw_Converter():
"""
Convert between HFL_MSG and low-level communication specific data/message format,
The HFL framework allow users to build their own communication methods such as grpc, http, socket or MPI etc.
To replace framework provided communication methods with customized ones, user need to implement <raw2HFLMsg> and <HFLMsg2raw>
functions to convert between HFL_MSG and their own data/message used in lower-level user build communication methods.
"""
@abstractmethod
def raw2HFLMsg(self,rawMsg:Any)->HFL_MSG:
'''
Convert raw message of base communication to HFL message
Parameters:
----------
rawMsg: raw message of base communication
Return:
_______
HFL_MSG: converted HFL message
'''
pass
@abstractmethod
def HFLMsg2raw(self,msg:HFL_MSG)->Any:
'''
Convert HFL message to message of base communication
Parameters:
----------
msg: HFL message
Return:
----------
message of base communication
'''
pass
class Message_Receiver(object):
def __init__(self,
config,
msg_observer:Raw_Msg_Observer=None):
'''
Message_Receiver are suppose to work as dispatcher, which runs on thread or new process to receive message send from remote machine(s),
and then forward to observer if it is given.
Parameters:
----------
config :Dict[Union[str,str,float,int]], receiver's configuration
msg_observer: Observer that gets notified when raw message is received
'''
self.config = config
self.msg_observer: Raw_Msg_Observer = msg_observer
def set_msg_observer(self,
observer:Raw_Msg_Observer,
)->None:
'''
Set observer that will receive raw message forwarded from <Message_Receiver>
Parameters:
----------
observer: Observer that gets notified when raw message is received
Return:
----------
None
'''
self.msg_observer = observer
def receiving_msg(self, data:Any)->Any:
'''
Parameters:
----------
data: raw message received
Return:
----------
raw message
'''
if self.msg_observer:
return self.msg_observer.receive_message(data)
@abstractmethod
def start(self):
pass
@abstractmethod
def stop(self):
pass
# Response = Union[str,bytes]
class Message_Sender():
""" Message_Sender send message/data to remote machine via <send> func """
def __init__(self,
receiver_info:Com_Machine_Info):
self.receiver_info = receiver_info
def get_receiver_info(self)->Com_Machine_Info:
return self.receiver_info
@abstractmethod
def send(self, data:Any)->Any:
pass
class GeneralCommunicator(BaseCommunicator):
def __init__(self,
sender:Message_Sender,
receiver:Message_Receiver,
msg_converter:HFL_Message_Raw_Converter,
mode ='client'):
self.mode = mode
self.sender = sender
self.receiver = receiver
self.msg_converter = msg_converter
self.receiver.set_msg_observer(self)
self._msg_handlers = []
self.msg_queue = queue.Queue()
self.is_running = False
def receive_message(self, data: Any) -> Any:
''' Convert receiver's Raw message into HFL_MSG and put to processing queque '''
msg:HFL_MSG = self.msg_converter.raw2HFLMsg(data)
logger.info(f'{type(self).__name__} receiverd msg :Type= {msg.type}')
lock.acquire()
self.msg_queue.put(msg)
lock.release()
resp_msg = HFL_MSG(HFL_MSG.CONTROL_RECEIVE,
msg.sender,
msg.receiver)
raw_resp_data = self.msg_converter.HFLMsg2raw(resp_msg)
logger.info(f'{type(self).__name__} port:{msg.receiver.port} put msg into queque: Type= {msg.type}')
return raw_resp_data
#@abstractmethod
def run(self):
# # 1. start gprc service that reive msg and stor in quque
# thread = threading.Thread(target=grpc_server.serve, args=(self.receiver,))
# thread.start()
self.start_message_receiving_routine()
# #2. start main message routine that retreive HFL_MSG and routine to corresponding processing fuction
self.is_running = True
self.msg_handling_routine()
@abstractmethod
def start_message_receiving_routine(self):
pass
def stop(self):
self.is_running = False
def msg_handling_routine(self):
'''
Start Message routine, once msg received forward to upstream handler,
Note that it is critical NOT to start a new thread at client side to avoid "slow CUDA GPU training"
'''
while self.is_running:
if self.msg_queue.qsize() > 0:
lock.acquire()
msg : HFL_MSG = self.msg_queue.get()
lock.release()
msg_type = msg.get_type()
logger.info(f'{type(self).__name__} in Mode:{self.mode} msg routine handling ==========>>: Type = {msg_type}')
for handler in self._msg_handlers:
logger.info(f'{type(self).__name__} : Forward msg to {type(handler)}----->>:type={msg_type}')
# Critical: set mode to "client" to avoid starting thread for GPU training, which cause very slow training!
if self.mode =='client':
handler.handle_message(msg_type, msg)
elif self.mode=='proxy':
threading.Thread(target=handler.handle_message, args=(msg_type, msg,)).start()
else:
raise(ValueError(f'Mode {self.mode} is not a valid mode'))
return
def add_msg_handler(self, handler:Msg_Handler)->None:
self._msg_handlers.append(handler)
def send_message(self, msg:HFL_MSG)->HFL_MSG:
res_msg = self.msg_converter.HFLMsg2raw(msg)
raw_msg:Any = \
self.sender.send(res_msg)
return self.msg_converter.raw2HFLMsg(raw_msg)
def remove_msg_handler(self, handler: Msg_Handler):
try:
self._msg_handlers.remove(handler)
except Exception as e:
logger.info(f'{type(self)} error {e}')
def get_MachineInfo(self)->MachineInfo:
return self.receiver.machine_info
class MachineInfo_Wrapper(Com_Machine_Info):
def __init__(self, ip, port, token='<PASSWORD>'):
self.core_MachineInfo = MachineInfo(ip,port,token)
def get_id(self)->str:
rep_str = f'{self.core_MachineInfo.ip}:{self.core_MachineInfo.port}'
return rep_str
def __repr__(self) -> str:
return self.get_id()
def get_CoreMachineInfo(self):
return self.core_MachineInfo
| en | 0.803455 | # Copyright 2021 Fedlearn authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. return machine identification Convert between HFL_MSG and low-level communication specific data/message format, The HFL framework allow users to build their own communication methods such as grpc, http, socket or MPI etc. To replace framework provided communication methods with customized ones, user need to implement <raw2HFLMsg> and <HFLMsg2raw> functions to convert between HFL_MSG and their own data/message used in lower-level user build communication methods. Convert raw message of base communication to HFL message Parameters: ---------- rawMsg: raw message of base communication Return: _______ HFL_MSG: converted HFL message Convert HFL message to message of base communication Parameters: ---------- msg: HFL message Return: ---------- message of base communication Message_Receiver are suppose to work as dispatcher, which runs on thread or new process to receive message send from remote machine(s), and then forward to observer if it is given. Parameters: ---------- config :Dict[Union[str,str,float,int]], receiver's configuration msg_observer: Observer that gets notified when raw message is received Set observer that will receive raw message forwarded from <Message_Receiver> Parameters: ---------- observer: Observer that gets notified when raw message is received Return: ---------- None Parameters: ---------- data: raw message received Return: ---------- raw message # Response = Union[str,bytes] Message_Sender send message/data to remote machine via <send> func Convert receiver's Raw message into HFL_MSG and put to processing queque #@abstractmethod # # 1. start gprc service that reive msg and stor in quque # thread = threading.Thread(target=grpc_server.serve, args=(self.receiver,)) # thread.start() # #2. start main message routine that retreive HFL_MSG and routine to corresponding processing fuction Start Message routine, once msg received forward to upstream handler, Note that it is critical NOT to start a new thread at client side to avoid "slow CUDA GPU training" # Critical: set mode to "client" to avoid starting thread for GPU training, which cause very slow training! | 1.961338 | 2 |
src/db_models/models.py | libercapital/dados_publicos_cnpj_receita_federal | 7 | 6620110 | <gh_stars>1-10
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import String, Float, Boolean, Date
from src import settings
from src.db_models.config_models import DBModelConfig
Base = declarative_base()
class CompanyRoot(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_ROOT # empresas
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
name = Column('name', String)
legal_nature_code = Column('legal_nature_code', String)
liable_qualification_code = Column('liable_qualification_code', String)
social_capital = Column('social_capital', Float)
size_code = Column('size_code', String)
efr = Column('efr', String)
N_RAW_COLUMNS = 7
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
legal_nature_desc = Column('legal_nature_desc', String)
liable_qualification_desc = Column('liable_qualification_desc', String)
size_desc = Column('size_desc', String)
class Company(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY # empresas
cnpj_root = Column('cnpj_root', String, index=True)
cnpj_branch = Column('cnpj_branch', String)
cnpj_digit = Column('cnpj_digit', String)
headquarters = Column('headquarters', Boolean)
trade_name = Column('trade_name', String)
situation = Column('situation_code', String)
situation_date = Column('situation_date', Date)
situation_reason = Column('situation_reason_code', String)
city_outer_name = Column('city_outer_name', String)
country_outer_name = Column('country_outer_name', String)
foundation_date = Column('foundation_date', Date)
cnae_main = Column('cnae_main', String)
cnae_sec = Column('cnae_sec', String)
# contacts
address_type = Column('address_type', String)
address = Column('address', String)
address_number = Column('address_number', String)
address_complement = Column('address_complement', String)
address_neighborhood = Column('address_neighborhood', String)
zip_code = Column('address_zip_code', String)
uf = Column('address_fu', String)
city_code = Column('address_city_code', String)
tel1_dd = Column('tel1_dd', String)
tel1 = Column('tel1', String)
tel2_dd = Column('tel2_dd', String)
tel2 = Column('tel2', String)
fax_dd = Column('fax_dd', String)
fax = Column('fax', String)
email = Column('email', String)
special_situation = Column('special_situation', String)
special_situation_date = Column('special_situation_date', Date)
N_RAW_COLUMNS = 30
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
cnpj = Column('cnpj', String, primary_key=True, index=True)
situation_desc = Column('situation_desc', String)
situation_reason_desc = Column('situation_reason_desc', String)
city = Column('address_city_name', String)
class Partners(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_PARTNERS # empresas
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
type_partner_code = Column('type_partner_code', String)
name = Column('name', String)
partner_doc = Column('partner_doc', String, primary_key=True)
qualification_code = Column('qualification_code', String)
entry_date = Column('entry_date', Date)
country = Column('country', String)
legal_representation_name = Column('legal_representation_name', String)
legal_representation_doc = Column('legal_representation_doc', String)
legal_representation_qualification_code = Column('legal_representation_qualification_code', String)
age_band_code = Column('age_band_code', String)
N_RAW_COLUMNS = 11
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
type_partner_desc = Column('type_partner_desc', String)
qualification_desc = Column('qualification_desc', String)
legal_representation_qualification_desc = Column('legal_representation_qualification_desc', String)
age_band_desc = Column('age_band_desc', String)
class CompanyRootSimples(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_ROOT_SIMPLES
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
simples_option_code = Column('simples_option_code', String)
simples_entry_date = Column('simples_entry_date', Date)
simples_exit_date = Column('simples_exit_date', Date)
mei_option_code = Column('mei_option_code', String)
mei_entry_date = Column('mei_entry_date', Date)
mei_exit_date = Column('mei_exit_date', Date)
N_RAW_COLUMNS = 7
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
simples_option_desc = Column('simples_option_desc', String)
mei_option_desc = Column('mei_option_desc', String)
class CompanyTaxRegime(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_TAX_REGIME
ref_year = Column('ref_year', String)
cnpj = Column('cnpj', String, primary_key=True, index=True)
tax_regime = Column('tax_regime', String)
city = Column('city_name', String)
uf = Column('fu', String)
N_RAW_COLUMNS = 5
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
cnpj_root = Column('cnpj_root', String, index=True)
class RefDate(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_REF_DATE
ref_date = Column('ref_date', Date, primary_key=True, index=True)
N_RAW_COLUMNS = 1
dict_db_models = {settings.DB_MODEL_COMPANY_ROOT: CompanyRoot,
settings.DB_MODEL_COMPANY: Company,
settings.DB_MODEL_COMPANY_TAX_REGIME: CompanyTaxRegime,
settings.DB_MODEL_PARTNERS: Partners,
settings.DB_MODEL_COMPANY_ROOT_SIMPLES: CompanyRootSimples,
settings.DB_MODEL_REF_DATE: RefDate,
}
| from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import String, Float, Boolean, Date
from src import settings
from src.db_models.config_models import DBModelConfig
Base = declarative_base()
class CompanyRoot(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_ROOT # empresas
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
name = Column('name', String)
legal_nature_code = Column('legal_nature_code', String)
liable_qualification_code = Column('liable_qualification_code', String)
social_capital = Column('social_capital', Float)
size_code = Column('size_code', String)
efr = Column('efr', String)
N_RAW_COLUMNS = 7
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
legal_nature_desc = Column('legal_nature_desc', String)
liable_qualification_desc = Column('liable_qualification_desc', String)
size_desc = Column('size_desc', String)
class Company(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY # empresas
cnpj_root = Column('cnpj_root', String, index=True)
cnpj_branch = Column('cnpj_branch', String)
cnpj_digit = Column('cnpj_digit', String)
headquarters = Column('headquarters', Boolean)
trade_name = Column('trade_name', String)
situation = Column('situation_code', String)
situation_date = Column('situation_date', Date)
situation_reason = Column('situation_reason_code', String)
city_outer_name = Column('city_outer_name', String)
country_outer_name = Column('country_outer_name', String)
foundation_date = Column('foundation_date', Date)
cnae_main = Column('cnae_main', String)
cnae_sec = Column('cnae_sec', String)
# contacts
address_type = Column('address_type', String)
address = Column('address', String)
address_number = Column('address_number', String)
address_complement = Column('address_complement', String)
address_neighborhood = Column('address_neighborhood', String)
zip_code = Column('address_zip_code', String)
uf = Column('address_fu', String)
city_code = Column('address_city_code', String)
tel1_dd = Column('tel1_dd', String)
tel1 = Column('tel1', String)
tel2_dd = Column('tel2_dd', String)
tel2 = Column('tel2', String)
fax_dd = Column('fax_dd', String)
fax = Column('fax', String)
email = Column('email', String)
special_situation = Column('special_situation', String)
special_situation_date = Column('special_situation_date', Date)
N_RAW_COLUMNS = 30
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
cnpj = Column('cnpj', String, primary_key=True, index=True)
situation_desc = Column('situation_desc', String)
situation_reason_desc = Column('situation_reason_desc', String)
city = Column('address_city_name', String)
class Partners(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_PARTNERS # empresas
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
type_partner_code = Column('type_partner_code', String)
name = Column('name', String)
partner_doc = Column('partner_doc', String, primary_key=True)
qualification_code = Column('qualification_code', String)
entry_date = Column('entry_date', Date)
country = Column('country', String)
legal_representation_name = Column('legal_representation_name', String)
legal_representation_doc = Column('legal_representation_doc', String)
legal_representation_qualification_code = Column('legal_representation_qualification_code', String)
age_band_code = Column('age_band_code', String)
N_RAW_COLUMNS = 11
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
type_partner_desc = Column('type_partner_desc', String)
qualification_desc = Column('qualification_desc', String)
legal_representation_qualification_desc = Column('legal_representation_qualification_desc', String)
age_band_desc = Column('age_band_desc', String)
class CompanyRootSimples(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_ROOT_SIMPLES
cnpj_root = Column('cnpj_root', String, primary_key=True, index=True)
simples_option_code = Column('simples_option_code', String)
simples_entry_date = Column('simples_entry_date', Date)
simples_exit_date = Column('simples_exit_date', Date)
mei_option_code = Column('mei_option_code', String)
mei_entry_date = Column('mei_entry_date', Date)
mei_exit_date = Column('mei_exit_date', Date)
N_RAW_COLUMNS = 7
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
simples_option_desc = Column('simples_option_desc', String)
mei_option_desc = Column('mei_option_desc', String)
class CompanyTaxRegime(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_COMPANY_TAX_REGIME
ref_year = Column('ref_year', String)
cnpj = Column('cnpj', String, primary_key=True, index=True)
tax_regime = Column('tax_regime', String)
city = Column('city_name', String)
uf = Column('fu', String)
N_RAW_COLUMNS = 5
# RAW COLUMNS FOR PARSER ENDS HERE
# NEW COLUMNS
cnpj_root = Column('cnpj_root', String, index=True)
class RefDate(Base, DBModelConfig):
__tablename__ = settings.DB_MODEL_REF_DATE
ref_date = Column('ref_date', Date, primary_key=True, index=True)
N_RAW_COLUMNS = 1
dict_db_models = {settings.DB_MODEL_COMPANY_ROOT: CompanyRoot,
settings.DB_MODEL_COMPANY: Company,
settings.DB_MODEL_COMPANY_TAX_REGIME: CompanyTaxRegime,
settings.DB_MODEL_PARTNERS: Partners,
settings.DB_MODEL_COMPANY_ROOT_SIMPLES: CompanyRootSimples,
settings.DB_MODEL_REF_DATE: RefDate,
} | en | 0.414277 | # empresas # RAW COLUMNS FOR PARSER ENDS HERE # NEW COLUMNS # empresas # contacts # RAW COLUMNS FOR PARSER ENDS HERE # NEW COLUMNS # empresas # RAW COLUMNS FOR PARSER ENDS HERE # NEW COLUMNS # RAW COLUMNS FOR PARSER ENDS HERE # NEW COLUMNS # RAW COLUMNS FOR PARSER ENDS HERE # NEW COLUMNS | 2.495576 | 2 |
lib/ext_transform.py | reyuwei/PIFu | 1,359 | 6620111 | <filename>lib/ext_transform.py<gh_stars>1000+
import random
import numpy as np
from skimage.filters import gaussian
import torch
from PIL import Image, ImageFilter
class RandomVerticalFlip(object):
def __call__(self, img):
if random.random() < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class MaskToTensor(object):
def __call__(self, img):
return torch.from_numpy(np.array(img, dtype=np.int32)).long()
class FreeScale(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = tuple(reversed(size)) # size: (h, w)
self.interpolation = interpolation
def __call__(self, img):
return img.resize(self.size, self.interpolation)
class FlipChannels(object):
def __call__(self, img):
img = np.array(img)[:, :, ::-1]
return Image.fromarray(img.astype(np.uint8))
class RandomGaussianBlur(object):
def __call__(self, img):
sigma = 0.15 + random.random() * 1.15
blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True)
blurred_img *= 255
return Image.fromarray(blurred_img.astype(np.uint8))
# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd,
eigval=(0.2175, 0.0188, 0.0045),
eigvec=((-0.5675, 0.7192, 0.4009),
(-0.5808, -0.0045, -0.8140),
(-0.5836, -0.6948, 0.4203))):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
| <filename>lib/ext_transform.py<gh_stars>1000+
import random
import numpy as np
from skimage.filters import gaussian
import torch
from PIL import Image, ImageFilter
class RandomVerticalFlip(object):
def __call__(self, img):
if random.random() < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class MaskToTensor(object):
def __call__(self, img):
return torch.from_numpy(np.array(img, dtype=np.int32)).long()
class FreeScale(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = tuple(reversed(size)) # size: (h, w)
self.interpolation = interpolation
def __call__(self, img):
return img.resize(self.size, self.interpolation)
class FlipChannels(object):
def __call__(self, img):
img = np.array(img)[:, :, ::-1]
return Image.fromarray(img.astype(np.uint8))
class RandomGaussianBlur(object):
def __call__(self, img):
sigma = 0.15 + random.random() * 1.15
blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True)
blurred_img *= 255
return Image.fromarray(blurred_img.astype(np.uint8))
# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd,
eigval=(0.2175, 0.0188, 0.0045),
eigvec=((-0.5675, 0.7192, 0.4009),
(-0.5808, -0.0045, -0.8140),
(-0.5836, -0.6948, 0.4203))):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
| en | 0.699006 | # size: (h, w) # Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py Lighting noise(AlexNet - style PCA - based noise) | 2.160531 | 2 |
test/mock_os.py | jan-g/psh | 0 | 6620112 | import contextlib
import fcntl
import os
from _pytest.monkeypatch import MonkeyPatch
class Os:
STDIN = ("STDIN", "r")
STDOUT = ("STDOUT", "w")
STDERR = ("STDERR", "w")
def __init__(self, fds=None):
if fds is None:
fds = {0: Os.STDIN, 1: Os.STDOUT, 2: Os.STDERR}
self.fds = fds
def open(self, file, mode):
fd = self._free()
self.fds[fd] = (file, mode)
return fd
def close(self, fd):
try:
del self.fds[fd]
except KeyError:
raise OSError()
def dup(self, fd):
try:
data = self.fds[fd]
fd2 = self._free()
self.fds[fd2] = data
return fd2
except KeyError:
raise OSError()
def dup2(self, fd, fd2):
try:
self.fds[fd2] = self.fds[fd]
return fd2
except KeyError:
raise OSError()
def fcntl(self, fd, cmd, arg):
assert cmd == fcntl.F_DUPFD
for i in range(arg, 1023):
if i not in self.fds:
try:
self.fds[i] = self.fds[fd]
return i
except KeyError:
raise OSError()
def _free(self):
for i in range(1023):
if i not in self.fds:
return i
@contextlib.contextmanager
def patch(self):
with MonkeyPatch().context() as mp:
mp.setattr(os, "open", self.open)
mp.setattr(os, "close", self.close)
mp.setattr(os, "dup", self.dup)
mp.setattr(os, "dup2", self.dup2)
mp.setattr(fcntl, "fcntl", self.fcntl)
yield
def test_os():
o = Os()
with o.patch():
assert os.open("blah", os.O_RDONLY) == 3
assert o.fds[3] == ("blah", os.O_RDONLY)
assert os.dup2(3, 1) == 1
assert o.fds[1] == ("blah", os.O_RDONLY)
| import contextlib
import fcntl
import os
from _pytest.monkeypatch import MonkeyPatch
class Os:
STDIN = ("STDIN", "r")
STDOUT = ("STDOUT", "w")
STDERR = ("STDERR", "w")
def __init__(self, fds=None):
if fds is None:
fds = {0: Os.STDIN, 1: Os.STDOUT, 2: Os.STDERR}
self.fds = fds
def open(self, file, mode):
fd = self._free()
self.fds[fd] = (file, mode)
return fd
def close(self, fd):
try:
del self.fds[fd]
except KeyError:
raise OSError()
def dup(self, fd):
try:
data = self.fds[fd]
fd2 = self._free()
self.fds[fd2] = data
return fd2
except KeyError:
raise OSError()
def dup2(self, fd, fd2):
try:
self.fds[fd2] = self.fds[fd]
return fd2
except KeyError:
raise OSError()
def fcntl(self, fd, cmd, arg):
assert cmd == fcntl.F_DUPFD
for i in range(arg, 1023):
if i not in self.fds:
try:
self.fds[i] = self.fds[fd]
return i
except KeyError:
raise OSError()
def _free(self):
for i in range(1023):
if i not in self.fds:
return i
@contextlib.contextmanager
def patch(self):
with MonkeyPatch().context() as mp:
mp.setattr(os, "open", self.open)
mp.setattr(os, "close", self.close)
mp.setattr(os, "dup", self.dup)
mp.setattr(os, "dup2", self.dup2)
mp.setattr(fcntl, "fcntl", self.fcntl)
yield
def test_os():
o = Os()
with o.patch():
assert os.open("blah", os.O_RDONLY) == 3
assert o.fds[3] == ("blah", os.O_RDONLY)
assert os.dup2(3, 1) == 1
assert o.fds[1] == ("blah", os.O_RDONLY)
| none | 1 | 2.282263 | 2 | |
fakeinline/tests/test_admin.py | kezabelle/django-fakeinline | 3 | 6620113 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
try:
from urllib.parse import urlparse
except ImportError: # py2.7 ... just for 1.8 tbh.
from urlparse import urlparse
import pytest
from django.contrib import admin
from django.core.urlresolvers import reverse
from fakeinline.datastructures import FakeInline
from .models import ModelForTesting
class AdminForTesting(admin.ModelAdmin):
inlines = [FakeInline]
@pytest.yield_fixture
def django_admin():
admin.site.register(ModelForTesting, AdminForTesting)
yield admin.site._registry[ModelForTesting]
admin.site.unregister(ModelForTesting)
def test_not_there():
assert ModelForTesting not in admin.site._registry
def test_add_GET_ok(django_admin, admin_client):
url = reverse('admin:tests_modelfortesting_add')
response = admin_client.get(url)
assert response.status_code == 200
def test_add_POST_ok(django_admin, admin_client):
url = reverse('admin:tests_modelfortesting_add')
redirect_to = reverse('admin:tests_modelfortesting_changelist')
response = admin_client.post(url, data={'hello': 'add'}, follow=True)
assert response.status_code == 200
# 1.8 included the http://host so we have to parse it out for compatibility.
# 1.9+ doesn't.
redirects = [(urlparse(url).path, code) for url, code in response.redirect_chain]
assert redirects == [(urlparse(redirect_to).path, 302)]
@pytest.mark.django_db
def test_edit_GET_ok(django_admin, admin_client):
obj = ModelForTesting.objects.create()
url = reverse('admin:tests_modelfortesting_change', args=(obj.pk,))
response = admin_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_edit_POST_ok(django_admin, admin_client):
obj = ModelForTesting.objects.create()
url = reverse('admin:tests_modelfortesting_change', args=(obj.pk,))
redirect_to = reverse('admin:tests_modelfortesting_changelist')
response = admin_client.post(url, data={'hello':'edit'}, follow=True)
assert response.status_code == 200
# 1.8 included the http://host so we have to parse it out for compatibility.
# 1.9+ doesn't.
redirects = [(urlparse(url).path, code) for url, code in response.redirect_chain]
assert redirects == [(urlparse(redirect_to).path, 302)]
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
try:
from urllib.parse import urlparse
except ImportError: # py2.7 ... just for 1.8 tbh.
from urlparse import urlparse
import pytest
from django.contrib import admin
from django.core.urlresolvers import reverse
from fakeinline.datastructures import FakeInline
from .models import ModelForTesting
class AdminForTesting(admin.ModelAdmin):
inlines = [FakeInline]
@pytest.yield_fixture
def django_admin():
admin.site.register(ModelForTesting, AdminForTesting)
yield admin.site._registry[ModelForTesting]
admin.site.unregister(ModelForTesting)
def test_not_there():
assert ModelForTesting not in admin.site._registry
def test_add_GET_ok(django_admin, admin_client):
url = reverse('admin:tests_modelfortesting_add')
response = admin_client.get(url)
assert response.status_code == 200
def test_add_POST_ok(django_admin, admin_client):
url = reverse('admin:tests_modelfortesting_add')
redirect_to = reverse('admin:tests_modelfortesting_changelist')
response = admin_client.post(url, data={'hello': 'add'}, follow=True)
assert response.status_code == 200
# 1.8 included the http://host so we have to parse it out for compatibility.
# 1.9+ doesn't.
redirects = [(urlparse(url).path, code) for url, code in response.redirect_chain]
assert redirects == [(urlparse(redirect_to).path, 302)]
@pytest.mark.django_db
def test_edit_GET_ok(django_admin, admin_client):
obj = ModelForTesting.objects.create()
url = reverse('admin:tests_modelfortesting_change', args=(obj.pk,))
response = admin_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_edit_POST_ok(django_admin, admin_client):
obj = ModelForTesting.objects.create()
url = reverse('admin:tests_modelfortesting_change', args=(obj.pk,))
redirect_to = reverse('admin:tests_modelfortesting_changelist')
response = admin_client.post(url, data={'hello':'edit'}, follow=True)
assert response.status_code == 200
# 1.8 included the http://host so we have to parse it out for compatibility.
# 1.9+ doesn't.
redirects = [(urlparse(url).path, code) for url, code in response.redirect_chain]
assert redirects == [(urlparse(redirect_to).path, 302)]
| en | 0.947788 | # -*- coding: utf-8 -*- # py2.7 ... just for 1.8 tbh. # 1.8 included the http://host so we have to parse it out for compatibility. # 1.9+ doesn't. # 1.8 included the http://host so we have to parse it out for compatibility. # 1.9+ doesn't. | 2.296935 | 2 |
resqpy/time_series/_time_series.py | bp/resqpy | 35 | 6620114 | <gh_stars>10-100
"""TimeSeries class handling normal (non-geological) time series."""
import logging
log = logging.getLogger(__name__)
import datetime as dt
import warnings
from ._any_time_series import AnyTimeSeries
from ._time_duration import TimeDuration
class TimeSeries(AnyTimeSeries):
"""Class for RESQML Time Series without year offsets.
notes:
use this class for time series on a human timeframe; use the resqpy GeologicTimeSeries class
instead if the time series is on a geological timeframe
"""
def __init__(self,
parent_model,
uuid = None,
time_series_root = None,
first_timestamp = None,
daily = None,
monthly = None,
quarterly = None,
yearly = None,
title = None,
originator = None,
extra_metadata = None):
"""Create a TimeSeries object, either from a time series node in parent model, or from given data.
arguments:
parent_model (model.Model): the resqpy model to which the time series will belong
uuid (uuid.UUID, optional): the uuid of a TimeSeries object to be loaded from xml
time_series_root (xml node, DEPRECATED): the xml root node; use uuid instead
first_time_stamp (str, optional): the first timestamp (in RESQML format) if not loading from xml;
this and the remaining arguments are ignored if loading from xml
daily (non-negative int, optional): the number of one day interval timesteps to start the series
monthly (non-negative int, optional): the number of 30 day interval timesteps to follow the daily
timesteps
quarterly (non-negative int, optional): the number of 90 day interval timesteps to follow the
monthly timesteps
yearly (non-negative int, optional): the number of 365 day interval timesteps to follow the
quarterly timesteps
title (str, optional): the citation title to use for a new time series;
ignored if uuid or time_series_root is not None
originator (str, optional): the name of the person creating the time series, defaults to login id;
ignored if uuid or time_series_root is not None
extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the time series;
ignored if uuid or time_series_root is not None
returns:
newly instantiated TimeSeries object
note:
a new bespoke time series can be populated by passing the first timestamp here and using the
add_timestamp() and/or extend_by...() methods
:meta common:
"""
self.timeframe = 'human'
self.timestamps = [] # ordered list of timestamp strings in resqml/iso format
if first_timestamp is not None:
self.timestamps.append(first_timestamp) # todo: check format of first_timestamp
if daily is not None:
for _ in range(daily):
self.extend_by_days(1)
if monthly is not None:
for _ in range(monthly):
self.extend_by_days(30)
if quarterly is not None:
for _ in range(quarterly):
self.extend_by_days(90) # could use 91
if yearly is not None:
for _ in range(yearly):
self.extend_by_days(365) # could use 360
super().__init__(model = parent_model,
uuid = uuid,
title = title,
originator = originator,
extra_metadata = extra_metadata,
root_node = time_series_root)
if self.extra_metadata is not None and self.extra_metadata.get('timeframe') == 'geologic':
raise ValueError('attempt to instantiate a human timeframe time series for a geologic time series')
def is_equivalent(self, other_ts, tol_seconds = 1):
"""Returns True if the this timestep series is essentially identical to the other; otherwise False."""
super_equivalence = super().is_equivalent(other_ts)
if super_equivalence is not None:
return super_equivalence
tolerance = TimeDuration(seconds = tol_seconds)
for t_index in range(self.number_of_timestamps()):
diff = TimeDuration(earlier_timestamp = self.timestamps[t_index],
later_timestamp = other_ts.timestamps[t_index])
if abs(diff.duration) > tolerance.duration:
return False
return True
def index_for_timestamp_not_later_than(self, timestamp):
"""Returns the index of the latest timestamp that is not later than the specified date.
:meta common:
"""
index = len(self.timestamps) - 1
while (index >= 0) and (self.timestamps[index] > timestamp):
index -= 1
if index < 0:
return None
return index
def index_for_timestamp_not_earlier_than(self, timestamp):
"""Returns the index of the earliest timestamp that is not earlier than the specified date.
:meta common:
"""
index = 0
while (index < len(self.timestamps)) and (self.timestamps[index] < timestamp):
index += 1
if index >= len(self.timestamps):
return None
return index
def index_for_timestamp_closest_to(self, timestamp):
"""Returns the index of the timestamp that is closest to the specified date.
:meta common:
"""
if not self.timestamps:
return None
before = self.index_for_timestamp_not_later_than(timestamp)
if not before:
return 0
if before == len(self.timestamps) - 1 or self.timestamps[before] == timestamp:
return before
after = before + 1
early_delta = TimeDuration(earlier_timestamp = self.timestamps[before], later_timestamp = timestamp)
later_delta = TimeDuration(earlier_timestamp = timestamp, later_timestamp = self.timestamps[after])
return before if early_delta.duration <= later_delta.duration else after
def duration_between_timestamps(self, earlier_index, later_index):
"""Returns the duration between a pair of timestamps.
:meta common:
"""
if earlier_index < 0 or later_index >= len(self.timestamps) or later_index < earlier_index:
return None
return TimeDuration(earlier_timestamp = self.timestamps[earlier_index],
later_timestamp = self.timestamps[later_index])
def days_between_timestamps(self, earlier_index, later_index):
"""Returns the number of whole days between a pair of timestamps, as an integer."""
delta = self.duration_between_timestamps(earlier_index, later_index)
if delta is None:
return None
return delta.duration.days
def duration_since_start(self, index):
"""Returns the duration between the start of the time series and the indexed timestamp.
:meta common:
"""
if index < 0 or index >= len(self.timestamps):
return None
return self.duration_between_timestamps(0, index)
def days_since_start(self, index):
"""Returns the number of days between the start of the time series and the indexed timestamp."""
return self.duration_since_start(index).duration.days
def step_duration(self, index):
"""Returns the duration of the time step between the indexed timestamp and preceding one.
:meta common:
"""
if index < 1 or index >= len(self.timestamps):
return None
return self.duration_between_timestamps(index - 1, index)
def step_days(self, index):
"""Returns the number of days between the indexed timestamp and preceding one."""
delta = self.step_duration(index)
if delta is None:
return None
return delta.duration.days
# NB: Following functions modify the time series, which is dangerous if the series is in use by a model
# Could check for relationships involving the time series and disallow changes if any found?
def add_timestamp(self, new_timestamp, allow_insertion = False):
"""Inserts a new timestamp into the time series."""
# todo: check that new_timestamp is in valid format (iso format + 'Z')
if allow_insertion:
# NB: This can insert a timestamp anywhere in the series, will invalidate indices, possibly corrupting model
index = self.index_for_timestamp_not_later_than(new_timestamp)
if index is None:
index = 0
else:
index += 1
self.timestamps.insert(index, new_timestamp)
else:
last = self.last_timestamp()
if last is not None:
assert (new_timestamp > self.last_timestamp())
self.timestamps.append(new_timestamp)
def extend_by_duration(self, duration):
"""Adds a timestamp to the end of the series, at duration beyond the last timestamp."""
assert (duration.duration.days >= 0) # duration may not be negative
assert (len(self.timestamps) > 0) # there must be something to extend from
self.timestamps.append(duration.timestamp_after_duration(self.last_timestamp()))
def extend_by_days(self, days):
"""Adds a timestamp to the end of the series, at a duration of days beyond the last timestamp."""
duration = TimeDuration(days = days)
self.extend_by_duration(duration)
def datetimes(self):
"""Returns the timestamps as a list of python-datetime objects."""
return [dt.datetime.fromisoformat(t.rstrip('Z')) for t in self.timestamps]
@property
def time_series_root(self):
"""DEPRECATED.
Alias for root
"""
warnings.warn("Attribute 'time_series_root' is deprecated. Use 'root'", DeprecationWarning)
return self.root
| """TimeSeries class handling normal (non-geological) time series."""
import logging
log = logging.getLogger(__name__)
import datetime as dt
import warnings
from ._any_time_series import AnyTimeSeries
from ._time_duration import TimeDuration
class TimeSeries(AnyTimeSeries):
"""Class for RESQML Time Series without year offsets.
notes:
use this class for time series on a human timeframe; use the resqpy GeologicTimeSeries class
instead if the time series is on a geological timeframe
"""
def __init__(self,
parent_model,
uuid = None,
time_series_root = None,
first_timestamp = None,
daily = None,
monthly = None,
quarterly = None,
yearly = None,
title = None,
originator = None,
extra_metadata = None):
"""Create a TimeSeries object, either from a time series node in parent model, or from given data.
arguments:
parent_model (model.Model): the resqpy model to which the time series will belong
uuid (uuid.UUID, optional): the uuid of a TimeSeries object to be loaded from xml
time_series_root (xml node, DEPRECATED): the xml root node; use uuid instead
first_time_stamp (str, optional): the first timestamp (in RESQML format) if not loading from xml;
this and the remaining arguments are ignored if loading from xml
daily (non-negative int, optional): the number of one day interval timesteps to start the series
monthly (non-negative int, optional): the number of 30 day interval timesteps to follow the daily
timesteps
quarterly (non-negative int, optional): the number of 90 day interval timesteps to follow the
monthly timesteps
yearly (non-negative int, optional): the number of 365 day interval timesteps to follow the
quarterly timesteps
title (str, optional): the citation title to use for a new time series;
ignored if uuid or time_series_root is not None
originator (str, optional): the name of the person creating the time series, defaults to login id;
ignored if uuid or time_series_root is not None
extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the time series;
ignored if uuid or time_series_root is not None
returns:
newly instantiated TimeSeries object
note:
a new bespoke time series can be populated by passing the first timestamp here and using the
add_timestamp() and/or extend_by...() methods
:meta common:
"""
self.timeframe = 'human'
self.timestamps = [] # ordered list of timestamp strings in resqml/iso format
if first_timestamp is not None:
self.timestamps.append(first_timestamp) # todo: check format of first_timestamp
if daily is not None:
for _ in range(daily):
self.extend_by_days(1)
if monthly is not None:
for _ in range(monthly):
self.extend_by_days(30)
if quarterly is not None:
for _ in range(quarterly):
self.extend_by_days(90) # could use 91
if yearly is not None:
for _ in range(yearly):
self.extend_by_days(365) # could use 360
super().__init__(model = parent_model,
uuid = uuid,
title = title,
originator = originator,
extra_metadata = extra_metadata,
root_node = time_series_root)
if self.extra_metadata is not None and self.extra_metadata.get('timeframe') == 'geologic':
raise ValueError('attempt to instantiate a human timeframe time series for a geologic time series')
def is_equivalent(self, other_ts, tol_seconds = 1):
"""Returns True if the this timestep series is essentially identical to the other; otherwise False."""
super_equivalence = super().is_equivalent(other_ts)
if super_equivalence is not None:
return super_equivalence
tolerance = TimeDuration(seconds = tol_seconds)
for t_index in range(self.number_of_timestamps()):
diff = TimeDuration(earlier_timestamp = self.timestamps[t_index],
later_timestamp = other_ts.timestamps[t_index])
if abs(diff.duration) > tolerance.duration:
return False
return True
def index_for_timestamp_not_later_than(self, timestamp):
"""Returns the index of the latest timestamp that is not later than the specified date.
:meta common:
"""
index = len(self.timestamps) - 1
while (index >= 0) and (self.timestamps[index] > timestamp):
index -= 1
if index < 0:
return None
return index
def index_for_timestamp_not_earlier_than(self, timestamp):
"""Returns the index of the earliest timestamp that is not earlier than the specified date.
:meta common:
"""
index = 0
while (index < len(self.timestamps)) and (self.timestamps[index] < timestamp):
index += 1
if index >= len(self.timestamps):
return None
return index
def index_for_timestamp_closest_to(self, timestamp):
"""Returns the index of the timestamp that is closest to the specified date.
:meta common:
"""
if not self.timestamps:
return None
before = self.index_for_timestamp_not_later_than(timestamp)
if not before:
return 0
if before == len(self.timestamps) - 1 or self.timestamps[before] == timestamp:
return before
after = before + 1
early_delta = TimeDuration(earlier_timestamp = self.timestamps[before], later_timestamp = timestamp)
later_delta = TimeDuration(earlier_timestamp = timestamp, later_timestamp = self.timestamps[after])
return before if early_delta.duration <= later_delta.duration else after
def duration_between_timestamps(self, earlier_index, later_index):
"""Returns the duration between a pair of timestamps.
:meta common:
"""
if earlier_index < 0 or later_index >= len(self.timestamps) or later_index < earlier_index:
return None
return TimeDuration(earlier_timestamp = self.timestamps[earlier_index],
later_timestamp = self.timestamps[later_index])
def days_between_timestamps(self, earlier_index, later_index):
"""Returns the number of whole days between a pair of timestamps, as an integer."""
delta = self.duration_between_timestamps(earlier_index, later_index)
if delta is None:
return None
return delta.duration.days
def duration_since_start(self, index):
"""Returns the duration between the start of the time series and the indexed timestamp.
:meta common:
"""
if index < 0 or index >= len(self.timestamps):
return None
return self.duration_between_timestamps(0, index)
def days_since_start(self, index):
"""Returns the number of days between the start of the time series and the indexed timestamp."""
return self.duration_since_start(index).duration.days
def step_duration(self, index):
"""Returns the duration of the time step between the indexed timestamp and preceding one.
:meta common:
"""
if index < 1 or index >= len(self.timestamps):
return None
return self.duration_between_timestamps(index - 1, index)
def step_days(self, index):
"""Returns the number of days between the indexed timestamp and preceding one."""
delta = self.step_duration(index)
if delta is None:
return None
return delta.duration.days
# NB: Following functions modify the time series, which is dangerous if the series is in use by a model
# Could check for relationships involving the time series and disallow changes if any found?
def add_timestamp(self, new_timestamp, allow_insertion = False):
"""Inserts a new timestamp into the time series."""
# todo: check that new_timestamp is in valid format (iso format + 'Z')
if allow_insertion:
# NB: This can insert a timestamp anywhere in the series, will invalidate indices, possibly corrupting model
index = self.index_for_timestamp_not_later_than(new_timestamp)
if index is None:
index = 0
else:
index += 1
self.timestamps.insert(index, new_timestamp)
else:
last = self.last_timestamp()
if last is not None:
assert (new_timestamp > self.last_timestamp())
self.timestamps.append(new_timestamp)
def extend_by_duration(self, duration):
"""Adds a timestamp to the end of the series, at duration beyond the last timestamp."""
assert (duration.duration.days >= 0) # duration may not be negative
assert (len(self.timestamps) > 0) # there must be something to extend from
self.timestamps.append(duration.timestamp_after_duration(self.last_timestamp()))
def extend_by_days(self, days):
"""Adds a timestamp to the end of the series, at a duration of days beyond the last timestamp."""
duration = TimeDuration(days = days)
self.extend_by_duration(duration)
def datetimes(self):
"""Returns the timestamps as a list of python-datetime objects."""
return [dt.datetime.fromisoformat(t.rstrip('Z')) for t in self.timestamps]
@property
def time_series_root(self):
"""DEPRECATED.
Alias for root
"""
warnings.warn("Attribute 'time_series_root' is deprecated. Use 'root'", DeprecationWarning)
return self.root | en | 0.815364 | TimeSeries class handling normal (non-geological) time series. Class for RESQML Time Series without year offsets. notes: use this class for time series on a human timeframe; use the resqpy GeologicTimeSeries class instead if the time series is on a geological timeframe Create a TimeSeries object, either from a time series node in parent model, or from given data. arguments: parent_model (model.Model): the resqpy model to which the time series will belong uuid (uuid.UUID, optional): the uuid of a TimeSeries object to be loaded from xml time_series_root (xml node, DEPRECATED): the xml root node; use uuid instead first_time_stamp (str, optional): the first timestamp (in RESQML format) if not loading from xml; this and the remaining arguments are ignored if loading from xml daily (non-negative int, optional): the number of one day interval timesteps to start the series monthly (non-negative int, optional): the number of 30 day interval timesteps to follow the daily timesteps quarterly (non-negative int, optional): the number of 90 day interval timesteps to follow the monthly timesteps yearly (non-negative int, optional): the number of 365 day interval timesteps to follow the quarterly timesteps title (str, optional): the citation title to use for a new time series; ignored if uuid or time_series_root is not None originator (str, optional): the name of the person creating the time series, defaults to login id; ignored if uuid or time_series_root is not None extra_metadata (dict, optional): string key, value pairs to add as extra metadata for the time series; ignored if uuid or time_series_root is not None returns: newly instantiated TimeSeries object note: a new bespoke time series can be populated by passing the first timestamp here and using the add_timestamp() and/or extend_by...() methods :meta common: # ordered list of timestamp strings in resqml/iso format # todo: check format of first_timestamp # could use 91 # could use 360 Returns True if the this timestep series is essentially identical to the other; otherwise False. Returns the index of the latest timestamp that is not later than the specified date. :meta common: Returns the index of the earliest timestamp that is not earlier than the specified date. :meta common: Returns the index of the timestamp that is closest to the specified date. :meta common: Returns the duration between a pair of timestamps. :meta common: Returns the number of whole days between a pair of timestamps, as an integer. Returns the duration between the start of the time series and the indexed timestamp. :meta common: Returns the number of days between the start of the time series and the indexed timestamp. Returns the duration of the time step between the indexed timestamp and preceding one. :meta common: Returns the number of days between the indexed timestamp and preceding one. # NB: Following functions modify the time series, which is dangerous if the series is in use by a model # Could check for relationships involving the time series and disallow changes if any found? Inserts a new timestamp into the time series. # todo: check that new_timestamp is in valid format (iso format + 'Z') # NB: This can insert a timestamp anywhere in the series, will invalidate indices, possibly corrupting model Adds a timestamp to the end of the series, at duration beyond the last timestamp. # duration may not be negative # there must be something to extend from Adds a timestamp to the end of the series, at a duration of days beyond the last timestamp. Returns the timestamps as a list of python-datetime objects. DEPRECATED. Alias for root | 2.949789 | 3 |
SDD/utils/GreedyRepulsion.py | thomascong121/SocialDistance | 2 | 6620115 | import numpy as np
import mxnet as mx
from mxnet import gluon
from gluoncv import utils
from mxnet import nd
from gluoncv.utils import bbox_iou
class RepulsionLoss(gluon.Block):
def __init__(self, iou_thresh = 0.5, sigma = 0.5, epo = 0.1, **kwargs):
super(RepulsionLoss, self).__init__(**kwargs)
self.iou_thresh = iou_thresh
self.sigma = sigma
self.epo = epo
def Smooth_Ln(self, x, sigma):
large = np.where(x > sigma)
small = np.where(x <= sigma)
large = x[large]
small = x[small]
large = np.sum((large-sigma)/(1-sigma) - np.log(1-sigma))
small = np.sum(-np.log(1-small))
return (large + small)
def forward(self, cls_preds, box_preds, cls_targets, box_targets, loss = None):
RepLoss = []
all_box_gt = box_targets[0].asnumpy()
all_box_pred = box_preds[0].asnumpy()
for i in range(all_box_pred.shape[0]):
#filter out all zero rows(mainly gt)
nonzero_boxgt_index = np.where(all_box_gt[i][:,0] != all_box_gt[i][:,2])
nonzero_boxpred_index = np.where(all_box_pred[i][:,0] != all_box_pred[i][:,2])
nonzero_box_gt = all_box_gt[i][nonzero_boxgt_index][:,0:4]
nonzero_box_pred = all_box_pred[i][nonzero_boxpred_index][:,0:4]
#calculate iou
_iou = bbox_iou(nonzero_box_pred, nonzero_box_gt)
# select positive proposals
pos_index = np.where(np.max(_iou, axis=1) >= self.iou_thresh)
_iou = _iou[pos_index]
#for each positive proposals keep its top two iou with targets
sort_index = _iou.argsort(axis = 1)[:,-2:]
iog = []
for _i in range(len(sort_index)):
tmp = _iou[_i, sort_index[_i]]
iog.append(tmp)
iog = np.array(iog)
if iog.shape[0] == 0:
RepGT = 0
RepBo = 0
else:
#RepulsionGT
RepGT = self.Smooth_Ln(iog[:,0], self.sigma)/iog.shape[0]
#for each ground truth keep only the proposal with highest iou
pos_gt_prop_index = np.argmax(_iou, axis=0)
pos_gt_prop = np.array([nonzero_box_pred[pos_gt_prop_index], nonzero_box_pred[pos_gt_prop_index]])
# RepulsionBox
box_l = np.array([])
total_iou = np.array([])
for row in range(len(pos_gt_prop[0])-1):
curr = pos_gt_prop[0][row].reshape(1,-1)
rest = pos_gt_prop[1][row+1:]
_bbox_iou = bbox_iou(curr, rest)
box_l = np.hstack((box_l, [self.Smooth_Ln(_bbox_iou, self.sigma)]))
total_iou = np.hstack((total_iou, [np.sum(_bbox_iou)]))
RepBo = np.sum(box_l) / (np.sum(total_iou) + self.epo)
RepLoss.append(RepGT + RepBo)
RepLoss = [nd.array(RepLoss, ctx=mx.gpu(0))]
if loss:
sum_loss, cls_loss, box_loss = loss(cls_preds, box_preds, cls_targets, box_targets)#TODO:YOLO-VERSION
return nd.add(RepLoss[0], sum_loss[0]), cls_loss, box_loss
else:
return RepLoss, 0,0 | import numpy as np
import mxnet as mx
from mxnet import gluon
from gluoncv import utils
from mxnet import nd
from gluoncv.utils import bbox_iou
class RepulsionLoss(gluon.Block):
def __init__(self, iou_thresh = 0.5, sigma = 0.5, epo = 0.1, **kwargs):
super(RepulsionLoss, self).__init__(**kwargs)
self.iou_thresh = iou_thresh
self.sigma = sigma
self.epo = epo
def Smooth_Ln(self, x, sigma):
large = np.where(x > sigma)
small = np.where(x <= sigma)
large = x[large]
small = x[small]
large = np.sum((large-sigma)/(1-sigma) - np.log(1-sigma))
small = np.sum(-np.log(1-small))
return (large + small)
def forward(self, cls_preds, box_preds, cls_targets, box_targets, loss = None):
RepLoss = []
all_box_gt = box_targets[0].asnumpy()
all_box_pred = box_preds[0].asnumpy()
for i in range(all_box_pred.shape[0]):
#filter out all zero rows(mainly gt)
nonzero_boxgt_index = np.where(all_box_gt[i][:,0] != all_box_gt[i][:,2])
nonzero_boxpred_index = np.where(all_box_pred[i][:,0] != all_box_pred[i][:,2])
nonzero_box_gt = all_box_gt[i][nonzero_boxgt_index][:,0:4]
nonzero_box_pred = all_box_pred[i][nonzero_boxpred_index][:,0:4]
#calculate iou
_iou = bbox_iou(nonzero_box_pred, nonzero_box_gt)
# select positive proposals
pos_index = np.where(np.max(_iou, axis=1) >= self.iou_thresh)
_iou = _iou[pos_index]
#for each positive proposals keep its top two iou with targets
sort_index = _iou.argsort(axis = 1)[:,-2:]
iog = []
for _i in range(len(sort_index)):
tmp = _iou[_i, sort_index[_i]]
iog.append(tmp)
iog = np.array(iog)
if iog.shape[0] == 0:
RepGT = 0
RepBo = 0
else:
#RepulsionGT
RepGT = self.Smooth_Ln(iog[:,0], self.sigma)/iog.shape[0]
#for each ground truth keep only the proposal with highest iou
pos_gt_prop_index = np.argmax(_iou, axis=0)
pos_gt_prop = np.array([nonzero_box_pred[pos_gt_prop_index], nonzero_box_pred[pos_gt_prop_index]])
# RepulsionBox
box_l = np.array([])
total_iou = np.array([])
for row in range(len(pos_gt_prop[0])-1):
curr = pos_gt_prop[0][row].reshape(1,-1)
rest = pos_gt_prop[1][row+1:]
_bbox_iou = bbox_iou(curr, rest)
box_l = np.hstack((box_l, [self.Smooth_Ln(_bbox_iou, self.sigma)]))
total_iou = np.hstack((total_iou, [np.sum(_bbox_iou)]))
RepBo = np.sum(box_l) / (np.sum(total_iou) + self.epo)
RepLoss.append(RepGT + RepBo)
RepLoss = [nd.array(RepLoss, ctx=mx.gpu(0))]
if loss:
sum_loss, cls_loss, box_loss = loss(cls_preds, box_preds, cls_targets, box_targets)#TODO:YOLO-VERSION
return nd.add(RepLoss[0], sum_loss[0]), cls_loss, box_loss
else:
return RepLoss, 0,0 | en | 0.870313 | #filter out all zero rows(mainly gt) #calculate iou # select positive proposals #for each positive proposals keep its top two iou with targets #RepulsionGT #for each ground truth keep only the proposal with highest iou # RepulsionBox #TODO:YOLO-VERSION | 2.034238 | 2 |
famous_quote.py | Datapotomus/python_crash_course | 0 | 6620116 | # Printing a famous quote
print('<NAME> once said, "Talent is cheaper than table salt. What separates the talented individual from the successful one is a lot of hard work."') | # Printing a famous quote
print('<NAME> once said, "Talent is cheaper than table salt. What separates the talented individual from the successful one is a lot of hard work."') | en | 0.884813 | # Printing a famous quote | 2.458243 | 2 |
day-two/day-two.py | xoxys/adventofcode2021 | 0 | 6620117 | <gh_stars>0
#!/usr/bin/env python3
with open("data.txt") as file:
lines = [line.rstrip() for line in file.readlines()]
def simple(lines=[]):
dimensions = {}
for item in lines:
dimensions[item.split()[0]] = dimensions.get(item.split()[0], 0) + int(
item.split()[1])
print(dimensions)
print((dimensions["down"] - dimensions["up"]) * dimensions["forward"])
def accurate(lines=[]):
dimensions = {"horizontal": 0, "aim": 0, "depth": 0}
for item in lines:
if item.split()[0] == "down":
dimensions["aim"] += int(item.split()[1])
if item.split()[0] == "up":
dimensions["aim"] -= int(item.split()[1])
if item.split()[0] == "forward":
dimensions["horizontal"] += int(item.split()[1])
dimensions["depth"] += dimensions["aim"] * int(item.split()[1])
print(dimensions)
print(dimensions["horizontal"] * dimensions["depth"])
simple(lines)
accurate(lines)
| #!/usr/bin/env python3
with open("data.txt") as file:
lines = [line.rstrip() for line in file.readlines()]
def simple(lines=[]):
dimensions = {}
for item in lines:
dimensions[item.split()[0]] = dimensions.get(item.split()[0], 0) + int(
item.split()[1])
print(dimensions)
print((dimensions["down"] - dimensions["up"]) * dimensions["forward"])
def accurate(lines=[]):
dimensions = {"horizontal": 0, "aim": 0, "depth": 0}
for item in lines:
if item.split()[0] == "down":
dimensions["aim"] += int(item.split()[1])
if item.split()[0] == "up":
dimensions["aim"] -= int(item.split()[1])
if item.split()[0] == "forward":
dimensions["horizontal"] += int(item.split()[1])
dimensions["depth"] += dimensions["aim"] * int(item.split()[1])
print(dimensions)
print(dimensions["horizontal"] * dimensions["depth"])
simple(lines)
accurate(lines) | fr | 0.221828 | #!/usr/bin/env python3 | 3.52633 | 4 |
astrodendro/io/fits.py | astrofrog/astrodendro | 1 | 6620118 | # Computing Astronomical Dendrograms
# Copyright (c) 2011-2012 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy as np
# Import and export
def dendro_export_fits(d, filename):
"""Export the dendrogram 'd' to the FITS file 'filename'"""
import pyfits
raise NotImplementedError("FITS export has not yet been implemented.")
def dendro_import_fits(filename):
"""Import 'filename' and construct a dendrogram from it"""
import pyfits
from ..dendrogram import Dendrogram
from ..structure import Structure
raise NotImplementedError("FITS import has not yet been implemented.")
| # Computing Astronomical Dendrograms
# Copyright (c) 2011-2012 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy as np
# Import and export
def dendro_export_fits(d, filename):
"""Export the dendrogram 'd' to the FITS file 'filename'"""
import pyfits
raise NotImplementedError("FITS export has not yet been implemented.")
def dendro_import_fits(filename):
"""Import 'filename' and construct a dendrogram from it"""
import pyfits
from ..dendrogram import Dendrogram
from ..structure import Structure
raise NotImplementedError("FITS import has not yet been implemented.")
| en | 0.750756 | # Computing Astronomical Dendrograms # Copyright (c) 2011-2012 <NAME> and <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # Import and export Export the dendrogram 'd' to the FITS file 'filename' Import 'filename' and construct a dendrogram from it | 1.495678 | 1 |
src/memo/constants.py | Auguron/solana-py | 1 | 6620119 | <filename>src/memo/constants.py
from solana.publickey import PublicKey
MEMO_PROGRAM: PublicKey = PublicKey("<KEY>")
| <filename>src/memo/constants.py
from solana.publickey import PublicKey
MEMO_PROGRAM: PublicKey = PublicKey("<KEY>")
| none | 1 | 1.48201 | 1 | |
tests/python/test_fallback.py | ssciwr/geolib4d | 3 | 6620120 | <gh_stars>1-10
from py4dgeo.fallback import *
from py4dgeo._py4dgeo import (
cylinder_workingset_finder as cxx_cylinder_workingset_finder,
no_uncertainty as cxx_no_uncertainty,
radius_workingset_finder as cxx_radius_workingset_finder,
standard_deviation_uncertainty as cxx_standard_deviation_uncertainty,
)
from py4dgeo.m3c2 import M3C2
from . import epochs
import pytest
@pytest.mark.parametrize(
"uncertainty_callback",
[
(cxx_standard_deviation_uncertainty, standard_deviation_uncertainty),
(cxx_no_uncertainty, no_uncertainty),
],
)
@pytest.mark.parametrize(
"workingset_callback",
[
(cxx_radius_workingset_finder, radius_workingset_finder),
(cxx_cylinder_workingset_finder, cylinder_workingset_finder),
],
)
def test_fallback_implementations(epochs, uncertainty_callback, workingset_callback):
class CxxTestM3C2(M3C2):
def callback_uncertainty_calculation(self):
return uncertainty_callback[0]
def callback_workingset_finder(self):
return workingset_callback[0]
class PythonTestM3C2(M3C2):
def callback_uncertainty_calculation(self):
return uncertainty_callback[1]
def callback_workingset_finder(self):
return workingset_callback[1]
# Instantiate a fallback M3C2 instance
pym3c2 = CxxTestM3C2(
epochs=epochs,
corepoints=epochs[0].cloud,
radii=(3.0,),
scales=(2.0,),
max_cylinder_length=6.0,
)
# And a regular C++ based one
m3c2 = PythonTestM3C2(
epochs=epochs,
corepoints=epochs[0].cloud,
radii=(3.0,),
scales=(2.0,),
max_cylinder_length=6.0,
)
# The results should match
distances, uncertainties = m3c2.run()
fb_distances, fb_uncertainties = pym3c2.run()
assert np.allclose(distances, fb_distances)
assert np.allclose(uncertainties["lodetection"], fb_uncertainties["lodetection"])
assert np.allclose(uncertainties["stddev1"], fb_uncertainties["stddev1"])
assert np.allclose(uncertainties["stddev2"], fb_uncertainties["stddev2"])
assert np.allclose(uncertainties["num_samples1"], fb_uncertainties["num_samples1"])
assert np.allclose(uncertainties["num_samples2"], fb_uncertainties["num_samples2"])
def test_python_fallback_m3c2(epochs):
# Instantiate a fallback M3C2 instance
pym3c2 = PythonFallbackM3C2(
epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,)
)
# And a regular C++ based one
m3c2 = M3C2(epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,))
# The results should match
distances, uncertainties = m3c2.run()
fb_distances, fb_uncertainties = pym3c2.run()
assert np.allclose(distances, fb_distances)
assert np.allclose(uncertainties["lodetection"], fb_uncertainties["lodetection"])
assert np.allclose(uncertainties["stddev1"], fb_uncertainties["stddev1"])
assert np.allclose(uncertainties["stddev2"], fb_uncertainties["stddev2"])
assert np.allclose(uncertainties["num_samples1"], fb_uncertainties["num_samples1"])
assert np.allclose(uncertainties["num_samples2"], fb_uncertainties["num_samples2"])
def test_python_exception_in_callback(epochs):
# Define a fault algorithm
class ExcM3C2(M3C2):
def callback_workingset_finder(self):
def callback(*args):
1 / 0
return callback
# Instantiate it
m3c2 = ExcM3C2(
epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,)
)
# Running it should throw the proper exception despite taking a detour
# throw multi-threaded C++ code.
with pytest.raises(ZeroDivisionError):
m3c2.run()
| from py4dgeo.fallback import *
from py4dgeo._py4dgeo import (
cylinder_workingset_finder as cxx_cylinder_workingset_finder,
no_uncertainty as cxx_no_uncertainty,
radius_workingset_finder as cxx_radius_workingset_finder,
standard_deviation_uncertainty as cxx_standard_deviation_uncertainty,
)
from py4dgeo.m3c2 import M3C2
from . import epochs
import pytest
@pytest.mark.parametrize(
"uncertainty_callback",
[
(cxx_standard_deviation_uncertainty, standard_deviation_uncertainty),
(cxx_no_uncertainty, no_uncertainty),
],
)
@pytest.mark.parametrize(
"workingset_callback",
[
(cxx_radius_workingset_finder, radius_workingset_finder),
(cxx_cylinder_workingset_finder, cylinder_workingset_finder),
],
)
def test_fallback_implementations(epochs, uncertainty_callback, workingset_callback):
class CxxTestM3C2(M3C2):
def callback_uncertainty_calculation(self):
return uncertainty_callback[0]
def callback_workingset_finder(self):
return workingset_callback[0]
class PythonTestM3C2(M3C2):
def callback_uncertainty_calculation(self):
return uncertainty_callback[1]
def callback_workingset_finder(self):
return workingset_callback[1]
# Instantiate a fallback M3C2 instance
pym3c2 = CxxTestM3C2(
epochs=epochs,
corepoints=epochs[0].cloud,
radii=(3.0,),
scales=(2.0,),
max_cylinder_length=6.0,
)
# And a regular C++ based one
m3c2 = PythonTestM3C2(
epochs=epochs,
corepoints=epochs[0].cloud,
radii=(3.0,),
scales=(2.0,),
max_cylinder_length=6.0,
)
# The results should match
distances, uncertainties = m3c2.run()
fb_distances, fb_uncertainties = pym3c2.run()
assert np.allclose(distances, fb_distances)
assert np.allclose(uncertainties["lodetection"], fb_uncertainties["lodetection"])
assert np.allclose(uncertainties["stddev1"], fb_uncertainties["stddev1"])
assert np.allclose(uncertainties["stddev2"], fb_uncertainties["stddev2"])
assert np.allclose(uncertainties["num_samples1"], fb_uncertainties["num_samples1"])
assert np.allclose(uncertainties["num_samples2"], fb_uncertainties["num_samples2"])
def test_python_fallback_m3c2(epochs):
# Instantiate a fallback M3C2 instance
pym3c2 = PythonFallbackM3C2(
epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,)
)
# And a regular C++ based one
m3c2 = M3C2(epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,))
# The results should match
distances, uncertainties = m3c2.run()
fb_distances, fb_uncertainties = pym3c2.run()
assert np.allclose(distances, fb_distances)
assert np.allclose(uncertainties["lodetection"], fb_uncertainties["lodetection"])
assert np.allclose(uncertainties["stddev1"], fb_uncertainties["stddev1"])
assert np.allclose(uncertainties["stddev2"], fb_uncertainties["stddev2"])
assert np.allclose(uncertainties["num_samples1"], fb_uncertainties["num_samples1"])
assert np.allclose(uncertainties["num_samples2"], fb_uncertainties["num_samples2"])
def test_python_exception_in_callback(epochs):
# Define a fault algorithm
class ExcM3C2(M3C2):
def callback_workingset_finder(self):
def callback(*args):
1 / 0
return callback
# Instantiate it
m3c2 = ExcM3C2(
epochs=epochs, corepoints=epochs[0].cloud, radii=(3.0,), scales=(2.0,)
)
# Running it should throw the proper exception despite taking a detour
# throw multi-threaded C++ code.
with pytest.raises(ZeroDivisionError):
m3c2.run() | en | 0.793838 | # Instantiate a fallback M3C2 instance # And a regular C++ based one # The results should match # Instantiate a fallback M3C2 instance # And a regular C++ based one # The results should match # Define a fault algorithm # Instantiate it # Running it should throw the proper exception despite taking a detour # throw multi-threaded C++ code. | 2.084858 | 2 |
seedpod_ground_risk/ui_resources/new_aircraft_wizard.py | Jordanjiun/cd11_seepod_ground_risk | 0 | 6620121 | <gh_stars>0
import typing
import PySide2
from PySide2.QtCore import QRegExp
from PySide2.QtGui import QRegExpValidator
from PySide2.QtWidgets import QWizard, QWizardPage, QLabel, QLineEdit, QGridLayout
from seedpod_ground_risk.ui_resources.layer_options import *
class NewAircraftInfoPage(QWizardPage):
def __init__(self, parent: typing.Optional[PySide2.QtWidgets.QWidget] = ...) -> None:
super().__init__(parent)
self.setTitle('New Aircraft Configuration')
def initializePage(self) -> None:
super().initializePage()
layout = QGridLayout()
for name, opt in AIRCRAFT_PARAMETERS.items():
regex = opt[0]
label = QLabel(name)
field = QLineEdit()
field.setValidator(QRegExpValidator(QRegExp(regex)))
label.setBuddy(field)
self.registerField(name + '*', field)
layout.addWidget(label)
layout.addWidget(field)
self.setLayout(layout)
class AircraftWizard(QWizard):
def __init__(self, parent: typing.Optional[PySide2.QtWidgets.QWidget] = ...,
flags: PySide2.QtCore.Qt.WindowFlags = ...) -> None:
super().__init__(parent, flags)
self.addPage(NewAircraftInfoPage(self))
self.setWindowTitle('Add Layer')
# TODO: Going back in wizard does not clear page fields.
# Hook into back button click and remove and re add page.
def accept(self) -> None:
super().accept()
self.aircraftKey = self.field('name')
self.opts = {}
self.d = {}
for name, opt in AIRCRAFT_PARAMETERS.items():
self.d[f'{opt[1]}'] = opt[2](self.field(name))
return self.d
| import typing
import PySide2
from PySide2.QtCore import QRegExp
from PySide2.QtGui import QRegExpValidator
from PySide2.QtWidgets import QWizard, QWizardPage, QLabel, QLineEdit, QGridLayout
from seedpod_ground_risk.ui_resources.layer_options import *
class NewAircraftInfoPage(QWizardPage):
def __init__(self, parent: typing.Optional[PySide2.QtWidgets.QWidget] = ...) -> None:
super().__init__(parent)
self.setTitle('New Aircraft Configuration')
def initializePage(self) -> None:
super().initializePage()
layout = QGridLayout()
for name, opt in AIRCRAFT_PARAMETERS.items():
regex = opt[0]
label = QLabel(name)
field = QLineEdit()
field.setValidator(QRegExpValidator(QRegExp(regex)))
label.setBuddy(field)
self.registerField(name + '*', field)
layout.addWidget(label)
layout.addWidget(field)
self.setLayout(layout)
class AircraftWizard(QWizard):
def __init__(self, parent: typing.Optional[PySide2.QtWidgets.QWidget] = ...,
flags: PySide2.QtCore.Qt.WindowFlags = ...) -> None:
super().__init__(parent, flags)
self.addPage(NewAircraftInfoPage(self))
self.setWindowTitle('Add Layer')
# TODO: Going back in wizard does not clear page fields.
# Hook into back button click and remove and re add page.
def accept(self) -> None:
super().accept()
self.aircraftKey = self.field('name')
self.opts = {}
self.d = {}
for name, opt in AIRCRAFT_PARAMETERS.items():
self.d[f'{opt[1]}'] = opt[2](self.field(name))
return self.d | en | 0.711238 | # TODO: Going back in wizard does not clear page fields. # Hook into back button click and remove and re add page. | 2.218323 | 2 |
BERT/_evaluate.py | vd1371/CBSA | 0 | 6620122 | <gh_stars>0
import numpy as np
import torch
def evaluate(model, val_dataloader, cross_entropy, **params):
device = torch.device("cuda")
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0, 0
# empty list to save the model predictions
total_preds = []
# iterate over batches
for step,batch in enumerate(val_dataloader):
# Progress update every 50 batches.
# if step % 50 == 0 and not step == 0:
# Calculate elapsed time in minutes.
# elapsed = format_time(time.time() - t0)
# Report progress.
# print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
# push the batch to gpu
batch = [t.to(device) for t in batch]
sent_id, mask, labels = batch
# deactivate autograd
with torch.no_grad():
# model predictions
preds = model(sent_id, mask)
# compute the validation loss between actual and predicted values
labels = labels.unsqueeze(1).float()
loss = cross_entropy(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
# compute the validation loss of the epoch
avg_loss = total_loss / len(val_dataloader)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis = 0)
return avg_loss, total_preds | import numpy as np
import torch
def evaluate(model, val_dataloader, cross_entropy, **params):
device = torch.device("cuda")
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0, 0
# empty list to save the model predictions
total_preds = []
# iterate over batches
for step,batch in enumerate(val_dataloader):
# Progress update every 50 batches.
# if step % 50 == 0 and not step == 0:
# Calculate elapsed time in minutes.
# elapsed = format_time(time.time() - t0)
# Report progress.
# print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
# push the batch to gpu
batch = [t.to(device) for t in batch]
sent_id, mask, labels = batch
# deactivate autograd
with torch.no_grad():
# model predictions
preds = model(sent_id, mask)
# compute the validation loss between actual and predicted values
labels = labels.unsqueeze(1).float()
loss = cross_entropy(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
# compute the validation loss of the epoch
avg_loss = total_loss / len(val_dataloader)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis = 0)
return avg_loss, total_preds | en | 0.674877 | # deactivate dropout layers # empty list to save the model predictions # iterate over batches # Progress update every 50 batches. # if step % 50 == 0 and not step == 0: # Calculate elapsed time in minutes. # elapsed = format_time(time.time() - t0) # Report progress. # print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader))) # push the batch to gpu # deactivate autograd # model predictions # compute the validation loss between actual and predicted values # compute the validation loss of the epoch # reshape the predictions in form of (number of samples, no. of classes) | 2.57438 | 3 |
supported_frameworks/tensorflow_abalone_age_predictor_using_keras/abalone.py | smrmkt/sagemaker-notebooks | 1 | 6620123 | # -*- coding: utf-8 -*-
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from tensorflow.python.estimator.export.export_output import PredictOutput
INPUT_TENSOR_NAME = "inputs"
SIGNATURE_NAME = "serving_default"
LEARNING_RATE = 0.001
def model_fn(features, labels, mode, params):
"""Estimator のためのモデル定義メソッド
# メソッドの構成は以下のとおり
# 1. Keras の Functional API 経由でモデルの設定を記述
# 2. Tensorflow を使って,学習・評価時の損失関数を定義
# 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義
# 4. Tensorflow の tensors として予測値を取得
# 5. 評価用のメトリクスを生成
# 6. 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す"""
# 1. Keras の Functional API 経由でモデルの設定を記述
first_hidden_layer = tf.keras.layers.Dense(10, activation='relu', name='first-layer')(features[INPUT_TENSOR_NAME])
second_hidden_layer = tf.keras.layers.Dense(10, activation='relu')(first_hidden_layer)
output_layer = tf.keras.layers.Dense(1, activation='linear')(second_hidden_layer)
predictions = tf.reshape(output_layer, [-1])
# 予測モードのとき(= `ModeKeys.PREDICT`)は,こちらの EstimatorSpec
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={"ages": predictions},
export_outputs={SIGNATURE_NAME: PredictOutput({"ages": predictions})})
# 2. Tensorflow を使って,学習・評価時の損失関数を定義
loss = tf.losses.mean_squared_error(labels, predictions)
# 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
# 4. Tensorflow の tensors として予測値を取得
predictions_dict = {"ages": predictions}
# 5. 評価用のメトリクスを生成
# RMSE を追加のメトリックとして計算
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(labels, tf.float32), predictions)
}
# 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def serving_input_fn(params):
tensor = tf.placeholder(tf.float32, shape=[1, 7])
return build_raw_serving_input_receiver_fn({INPUT_TENSOR_NAME: tensor})()
params = {"learning_rate": LEARNING_RATE}
def train_input_fn(training_dir, params):
return _input_fn(training_dir, 'abalone_train.csv')
def eval_input_fn(training_dir, params):
return _input_fn(training_dir, 'abalone_test.csv')
def _input_fn(training_dir, training_filename):
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=os.path.join(training_dir, training_filename), target_dtype=np.int, features_dtype=np.float32)
return tf.estimator.inputs.numpy_input_fn(
x={INPUT_TENSOR_NAME: np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)()
| # -*- coding: utf-8 -*-
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from tensorflow.python.estimator.export.export_output import PredictOutput
INPUT_TENSOR_NAME = "inputs"
SIGNATURE_NAME = "serving_default"
LEARNING_RATE = 0.001
def model_fn(features, labels, mode, params):
"""Estimator のためのモデル定義メソッド
# メソッドの構成は以下のとおり
# 1. Keras の Functional API 経由でモデルの設定を記述
# 2. Tensorflow を使って,学習・評価時の損失関数を定義
# 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義
# 4. Tensorflow の tensors として予測値を取得
# 5. 評価用のメトリクスを生成
# 6. 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す"""
# 1. Keras の Functional API 経由でモデルの設定を記述
first_hidden_layer = tf.keras.layers.Dense(10, activation='relu', name='first-layer')(features[INPUT_TENSOR_NAME])
second_hidden_layer = tf.keras.layers.Dense(10, activation='relu')(first_hidden_layer)
output_layer = tf.keras.layers.Dense(1, activation='linear')(second_hidden_layer)
predictions = tf.reshape(output_layer, [-1])
# 予測モードのとき(= `ModeKeys.PREDICT`)は,こちらの EstimatorSpec
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={"ages": predictions},
export_outputs={SIGNATURE_NAME: PredictOutput({"ages": predictions})})
# 2. Tensorflow を使って,学習・評価時の損失関数を定義
loss = tf.losses.mean_squared_error(labels, predictions)
# 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
# 4. Tensorflow の tensors として予測値を取得
predictions_dict = {"ages": predictions}
# 5. 評価用のメトリクスを生成
# RMSE を追加のメトリックとして計算
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(labels, tf.float32), predictions)
}
# 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def serving_input_fn(params):
tensor = tf.placeholder(tf.float32, shape=[1, 7])
return build_raw_serving_input_receiver_fn({INPUT_TENSOR_NAME: tensor})()
params = {"learning_rate": LEARNING_RATE}
def train_input_fn(training_dir, params):
return _input_fn(training_dir, 'abalone_train.csv')
def eval_input_fn(training_dir, params):
return _input_fn(training_dir, 'abalone_test.csv')
def _input_fn(training_dir, training_filename):
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=os.path.join(training_dir, training_filename), target_dtype=np.int, features_dtype=np.float32)
return tf.estimator.inputs.numpy_input_fn(
x={INPUT_TENSOR_NAME: np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)()
| ja | 0.990208 | # -*- coding: utf-8 -*- Estimator のためのモデル定義メソッド # メソッドの構成は以下のとおり # 1. Keras の Functional API 経由でモデルの設定を記述 # 2. Tensorflow を使って,学習・評価時の損失関数を定義 # 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義 # 4. Tensorflow の tensors として予測値を取得 # 5. 評価用のメトリクスを生成 # 6. 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す # 1. Keras の Functional API 経由でモデルの設定を記述 # 予測モードのとき(= `ModeKeys.PREDICT`)は,こちらの EstimatorSpec # 2. Tensorflow を使って,学習・評価時の損失関数を定義 # 3. Tensorflow を使って,学習時のオペレータ・オプティマイザを定義 # 4. Tensorflow の tensors として予測値を取得 # 5. 評価用のメトリクスを生成 # RMSE を追加のメトリックとして計算 # 予測値・損失関数・学習オペレータ・評価用メトリクスを EstimatorSpec オブジェクトとして返す | 2.504138 | 3 |
flask/tests/test_start.py | imsardine/learning | 0 | 6620124 | <gh_stars>0
import requests
import pytest
def test_hello_world(workspace, flask_ver):
workspace.src('hello.py', """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
""")
import flask
if flask_ver[0] == 0: # 0.x
message = """
| * Serving Flask app "hello"
| * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
"""
else: # 1.x
message = """
| * Serving Flask app "hello.py"
| * Environment: production
|\x1b[31m WARNING: Do not use the development server in a production environment.\x1b[0m
|\x1b[2m Use a production WSGI server instead.\x1b[0m
| * Debug mode: off
| * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
"""
with workspace.spawn('FLASK_APP=hello.py flask run') as p:
p.expect_exact(message)
resp = requests.get('http://localhost:5000')
assert resp.text == 'Hello, World!'
def test_hello__somebody__hello_somebody(client):
resp = client.get('/hello/Flask')
assert resp.data == b'Hello, Flask!'
def test_hello_form_view(client):
resp = client.get('/hello/')
assert resp.status_code == 200
assert b'Say hello to' in resp.data
def test_hello_form_submission__empty__rerender(client):
resp = client.post('/hello/', data=dict(name=''))
assert resp.status_code == 200
assert b'Say hello to' in resp.data
def test_hello_form_submission__not_empty__say_hello(client):
resp = client.post('/hello/', data=dict(name='Flask'))
assert resp.status_code == 302
assert resp.headers.get('Location') == 'http://localhost/hello/Flask'
| import requests
import pytest
def test_hello_world(workspace, flask_ver):
workspace.src('hello.py', """
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
""")
import flask
if flask_ver[0] == 0: # 0.x
message = """
| * Serving Flask app "hello"
| * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
"""
else: # 1.x
message = """
| * Serving Flask app "hello.py"
| * Environment: production
|\x1b[31m WARNING: Do not use the development server in a production environment.\x1b[0m
|\x1b[2m Use a production WSGI server instead.\x1b[0m
| * Debug mode: off
| * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
"""
with workspace.spawn('FLASK_APP=hello.py flask run') as p:
p.expect_exact(message)
resp = requests.get('http://localhost:5000')
assert resp.text == 'Hello, World!'
def test_hello__somebody__hello_somebody(client):
resp = client.get('/hello/Flask')
assert resp.data == b'Hello, Flask!'
def test_hello_form_view(client):
resp = client.get('/hello/')
assert resp.status_code == 200
assert b'Say hello to' in resp.data
def test_hello_form_submission__empty__rerender(client):
resp = client.post('/hello/', data=dict(name=''))
assert resp.status_code == 200
assert b'Say hello to' in resp.data
def test_hello_form_submission__not_empty__say_hello(client):
resp = client.post('/hello/', data=dict(name='Flask'))
assert resp.status_code == 302
assert resp.headers.get('Location') == 'http://localhost/hello/Flask' | en | 0.639004 | from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello, World!' # 0.x | * Serving Flask app "hello" | * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) # 1.x | * Serving Flask app "hello.py" | * Environment: production |\x1b[31m WARNING: Do not use the development server in a production environment.\x1b[0m |\x1b[2m Use a production WSGI server instead.\x1b[0m | * Debug mode: off | * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) | 2.537788 | 3 |
vk_bot/mods/util/genpass.py | triangle1984/vk-bot | 3 | 6620125 | <filename>vk_bot/mods/util/genpass.py
import pyPrivnote, subprocess
from vk_bot.core.modules.basicplug import BasicPlug
class Genpass(BasicPlug):
doc = "Сгенерирует пароль"
command = ("пароль",)
def main(self):
try:
length = int(self.text[1])
except:
length = 64
if length > 999999:
length = 99999
text = f"openssl rand -base64 {length}"
result = subprocess.check_output(text, shell=True, encoding="utf-8")
url = pyPrivnote.create_note(result)
self.sendmsg(f"Пароль тута: {url} . Ссылка на сгорающую записку, которая удалится после просмотра кем либо") | <filename>vk_bot/mods/util/genpass.py
import pyPrivnote, subprocess
from vk_bot.core.modules.basicplug import BasicPlug
class Genpass(BasicPlug):
doc = "Сгенерирует пароль"
command = ("пароль",)
def main(self):
try:
length = int(self.text[1])
except:
length = 64
if length > 999999:
length = 99999
text = f"openssl rand -base64 {length}"
result = subprocess.check_output(text, shell=True, encoding="utf-8")
url = pyPrivnote.create_note(result)
self.sendmsg(f"Пароль тута: {url} . Ссылка на сгорающую записку, которая удалится после просмотра кем либо") | none | 1 | 2.150104 | 2 | |
code/preprocessing/lowercase.py | louiskhub/TweetViralityClassifier | 0 | 6620126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Preprocessor that lowercases the original tweet text.
@author: marcelklehr
"""
from code.preprocessing.preprocessor import Preprocessor
class Lowercase(Preprocessor):
"""Preprocessor that lowercases the original tweet text"""
# constructor
def __init__(self, input_column, output_column):
# input column "tweet", new output column
super().__init__([input_column], output_column)
# don't implement _set_variables()
# get preprocessed column based on data frame and internal variables
def _get_values(self, inputs):
# lowercase column
column = inputs[0].str.lower()
return column | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Preprocessor that lowercases the original tweet text.
@author: marcelklehr
"""
from code.preprocessing.preprocessor import Preprocessor
class Lowercase(Preprocessor):
"""Preprocessor that lowercases the original tweet text"""
# constructor
def __init__(self, input_column, output_column):
# input column "tweet", new output column
super().__init__([input_column], output_column)
# don't implement _set_variables()
# get preprocessed column based on data frame and internal variables
def _get_values(self, inputs):
# lowercase column
column = inputs[0].str.lower()
return column | en | 0.478061 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Preprocessor that lowercases the original tweet text. @author: marcelklehr Preprocessor that lowercases the original tweet text # constructor # input column "tweet", new output column # don't implement _set_variables() # get preprocessed column based on data frame and internal variables # lowercase column | 3.838938 | 4 |
ui_controller.py | MeowMeowZi/PPLTestTool | 0 | 6620127 | <reponame>MeowMeowZi/PPLTestTool<gh_stars>0
import shelve
import re
import sys
import threading
import time
import socket_temperature_connect
import socket_oscilloscope_connect
# import usb_connect
import serial_connect
from main_window import Ui_MainWindow
from PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QMessageBox
from PyQt5.QtCore import QTimer, QThread, pyqtSignal
class MainUI(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainUI, self).__init__()
self.setupUi(self)
# 测试变量
self.test_info = False
self.test_text = ''
# 日志名字
self.log_name = ''
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据
self.scope_ip = ''
self.scope_setup = ''
# Temperature标签页数据
self.temp_ip = ''
self.temp_channel1_temp = ''
self.temp_channel2_temp = ''
self.temp_channel3_temp = ''
self.temp_channel4_temp = ''
self.temp_is_channel1_temp = False
self.temp_is_channel2_temp = False
self.temp_is_channel3_temp = False
self.temp_is_channel4_temp = False
# Power标签页数据
self.power_high_voltage = ''
self.power_mid_voltage = ''
self.power_low_voltage = ''
self.power_vid = ''
self.power_pid = ''
# Debug标签页数据
self.debug_port = ''
self.debug_mode = []
# 读取初始化文件并显示在软件上
self.init_setting()
self.pushbutton_signal_manage()
self.lineedit_signal_manage()
def pushbutton_signal_manage(self):
self.pushButton_info_start.clicked.connect(
lambda: self.pushbutton_slot_manage(self.pushButton_info_start)
)
def pushbutton_slot_manage(self, button):
if button == self.pushButton_info_start:
self.start()
def lineedit_signal_manage(self):
pass
# self.lineEdit_scope_ip.textChanged.connect(
# lambda: self.lineedit_slot_manage(self.lineEdit_scope_ip)
# )
def lineedit_slot_manage(self, lineedit):
pass
# regex_ip = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
# if lineedit == self.lineEdit_scope_ip:
# if not regex_ip.search(self.lineEdit_scope_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
#
# if lineedit == self.lineEdit_temp_ip:
# if not regex_ip.search(self.lineEdit_temp_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
# 关闭软件自动保存
def closeEvent(self, QCloseEvent):
self.data_save()
print('save success!')
# 开启软件时,将上一次关闭时保存的配置配置到软件上
def init_setting(self):
# Oscilloscope数据显示
try:
self.scope_ip = self.init_scope['scope_ip']
self.lineEdit_scope_ip.setText(self.scope_ip)
except KeyError:
pass
try:
self.scope_setup = self.init_scope['scope_setup']
self.lineEdit_scope_setup.setText(self.scope_setup)
except KeyError:
pass
# Temperature数据显示
try:
self.temp_ip = self.init_temp['temp_ip']
self.lineEdit_temp_ip.setText(self.temp_ip)
except KeyError:
pass
try:
self.temp_channel1_temp = self.init_temp['temp_channel1_temp']
self.lineEdit_temp_channl1.setText(self.temp_channel1_temp)
except KeyError:
pass
try:
self.temp_channel2_temp = self.init_temp['temp_channel2_temp']
self.lineEdit_temp_channl2.setText(self.temp_channel2_temp)
except KeyError:
pass
try:
self.temp_channel3_temp = self.init_temp['temp_channel3_temp']
self.lineEdit_temp_channl3.setText(self.temp_channel3_temp)
except KeyError:
pass
try:
self.temp_channel4_temp = self.init_temp['temp_channel4_temp']
self.lineEdit_temp_channl4.setText(self.temp_channel4_temp)
except KeyError:
pass
try:
self.temp_is_channel1_temp = self.init_temp['temp_is_channel1_temp']
self.checkBox_temp_channel1.setCheckState(self.temp_is_channel1_temp)
except KeyError:
pass
try:
self.temp_is_channel2_temp = self.init_temp['temp_is_channel2_temp']
self.checkBox_temp_channel2.setCheckState(self.temp_is_channel2_temp)
except KeyError:
pass
try:
self.temp_is_channel3_temp = self.init_temp['temp_is_channel3_temp']
self.checkBox_temp_channel3.setCheckState(self.temp_is_channel3_temp)
except KeyError:
pass
try:
self.temp_is_channel4_temp = self.init_temp['temp_is_channel4_temp']
self.checkBox_temp_channel4.setCheckState(self.temp_is_channel4_temp)
except KeyError:
pass
# Power数据显示
try:
self.power_high_voltage = self.init_power['power_high_voltage']
self.lineEdit_power_high_voltage.setText(self.power_high_voltage)
except KeyError:
pass
try:
self.power_mid_voltage = self.init_power['power_mid_voltage']
self.lineEdit_power_mid_voltage.setText(self.power_mid_voltage)
except KeyError:
pass
try:
self.power_low_voltage = self.init_power['power_low_voltage']
self.lineEdit_power_low_voltage.setText(self.power_low_voltage)
except KeyError:
pass
try:
self.power_vid = self.init_power['power_vid']
self.lineEdit_power_vid.setText(self.power_vid)
except KeyError:
pass
try:
self.power_pid = self.init_power['power_pid']
self.lineEdit_power_pid.setText(self.power_pid)
except KeyError:
pass
# Debug数据显示
try:
self.debug_port = self.init_debug['debug_port']
self.lineEdit_debug_port.setText(self.debug_port)
except KeyError:
pass
try:
self.debug_mode = self.init_debug['debug_mode']
for i in range(len(self.debug_mode)):
for j in range(len(self.debug_mode[0])):
self.tableWidget_debug_mode.setItem(i, j, QTableWidgetItem(self.debug_mode[i][j]))
except KeyError:
pass
# 界面数据保存到变量中,再保存到配置文件中
def data_save(self):
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据保存
self.scope_ip = self.lineEdit_scope_ip.text()
self.scope_setup = self.lineEdit_scope_setup.text()
self.init_scope['scope_ip'] = self.scope_ip
self.init_scope['scope_setup'] = self.scope_setup
# Temperature标签页数据保存
self.temp_ip = self.lineEdit_temp_ip.text()
self.temp_channel1_temp = self.lineEdit_temp_channl1.text()
self.temp_channel2_temp = self.lineEdit_temp_channl2.text()
self.temp_channel3_temp = self.lineEdit_temp_channl3.text()
self.temp_channel4_temp = self.lineEdit_temp_channl4.text()
self.temp_is_channel1_temp = self.checkBox_temp_channel1.checkState()
self.temp_is_channel2_temp = self.checkBox_temp_channel2.checkState()
self.temp_is_channel3_temp = self.checkBox_temp_channel3.checkState()
self.temp_is_channel4_temp = self.checkBox_temp_channel4.checkState()
self.init_temp['temp_ip'] = self.temp_ip
self.init_temp['temp_channel1_temp'] = self.temp_channel1_temp
self.init_temp['temp_channel2_temp'] = self.temp_channel2_temp
self.init_temp['temp_channel3_temp'] = self.temp_channel3_temp
self.init_temp['temp_channel4_temp'] = self.temp_channel4_temp
self.init_temp['temp_is_channel1_temp'] = self.temp_is_channel1_temp
self.init_temp['temp_is_channel2_temp'] = self.temp_is_channel2_temp
self.init_temp['temp_is_channel3_temp'] = self.temp_is_channel3_temp
self.init_temp['temp_is_channel4_temp'] = self.temp_is_channel4_temp
# Power标签页数据保存
self.power_high_voltage = self.lineEdit_power_high_voltage.text()
self.power_mid_voltage = self.lineEdit_power_mid_voltage.text()
self.power_low_voltage = self.lineEdit_power_low_voltage.text()
self.power_vid = self.lineEdit_power_vid.text()
self.power_pid = self.lineEdit_power_pid.text()
self.init_power['power_high_voltage'] = self.power_high_voltage
self.init_power['power_mid_voltage'] = self.power_mid_voltage
self.init_power['power_low_voltage'] = self.power_low_voltage
self.init_power['power_vid'] = self.power_vid
self.init_power['power_pid'] = self.power_pid
# Debug标签页数据保存
self.debug_port = self.lineEdit_debug_port.text()
debug_mode = []
try:
for i in range(self.tableWidget_debug_mode.rowCount()):
list_ = []
for j in range(self.tableWidget_debug_mode.columnCount()):
text = self.tableWidget_debug_mode.item(i, j).text()
if text == '':
break
list_.append(text)
if list_ == []:
break
debug_mode.append(list_)
except:
pass
self.init_debug['debug_port'] = self.debug_port
self.debug_mode = debug_mode
self.init_debug['debug_mode'] = debug_mode
# 关闭配置文件
self.init_scope.close()
self.init_temp.close()
self.init_power.close()
self.init_debug.close()
def start(self):
self.log_name = 'log/' + time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()) + '_' + 'log.txt'
self.data_save()
threading.Thread(target=self.run).start()
def run(self):
self.temp = socket_temperature_connect.Temperature()
threading.Thread(target=self.temp_info).start()
self.scope = socket_oscilloscope_connect.Oscilloscope()
threading.Thread(target=self.scope_info).start()
self.power = usb_connect.Power()
threading.Thread(target=self.power_info).start()
self.debug = serial_connect.Debug()
threading.Thread(target=self.debug_info).start()
self.temp.task_generate()
self.power.task_generate()
self.debug.task_generate()
self.temp.start()
for i in self.temp.task:
self.temp.run(i)
for j in self.power.task:
self.power.run(j)
for k in self.debug.task:
self.debug.run(k)
name = 'temp_'+str(i[0])+'-'+'power_'+str(j[1])+'-'+'debug_'+str(k[1])
self.scope.run(name)
self.temp.stop()
# 将信息打印到窗口
def temp_info(self):
while True:
if self.temp.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.temp.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.temp.is_info = False
def scope_info(self):
while True:
if self.scope.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.scope.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.scope.is_info = False
def power_info(self):
while True:
if self.power.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.power.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.power.is_info = False
def debug_info(self):
while True:
if self.debug.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.debug.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.debug.is_info = False
if __name__ == '__main__':
app = QApplication(sys.argv)
MainUI = MainUI()
MainUI.show()
sys.exit(app.exec_())
| import shelve
import re
import sys
import threading
import time
import socket_temperature_connect
import socket_oscilloscope_connect
# import usb_connect
import serial_connect
from main_window import Ui_MainWindow
from PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QMessageBox
from PyQt5.QtCore import QTimer, QThread, pyqtSignal
class MainUI(QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainUI, self).__init__()
self.setupUi(self)
# 测试变量
self.test_info = False
self.test_text = ''
# 日志名字
self.log_name = ''
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据
self.scope_ip = ''
self.scope_setup = ''
# Temperature标签页数据
self.temp_ip = ''
self.temp_channel1_temp = ''
self.temp_channel2_temp = ''
self.temp_channel3_temp = ''
self.temp_channel4_temp = ''
self.temp_is_channel1_temp = False
self.temp_is_channel2_temp = False
self.temp_is_channel3_temp = False
self.temp_is_channel4_temp = False
# Power标签页数据
self.power_high_voltage = ''
self.power_mid_voltage = ''
self.power_low_voltage = ''
self.power_vid = ''
self.power_pid = ''
# Debug标签页数据
self.debug_port = ''
self.debug_mode = []
# 读取初始化文件并显示在软件上
self.init_setting()
self.pushbutton_signal_manage()
self.lineedit_signal_manage()
def pushbutton_signal_manage(self):
self.pushButton_info_start.clicked.connect(
lambda: self.pushbutton_slot_manage(self.pushButton_info_start)
)
def pushbutton_slot_manage(self, button):
if button == self.pushButton_info_start:
self.start()
def lineedit_signal_manage(self):
pass
# self.lineEdit_scope_ip.textChanged.connect(
# lambda: self.lineedit_slot_manage(self.lineEdit_scope_ip)
# )
def lineedit_slot_manage(self, lineedit):
pass
# regex_ip = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
# if lineedit == self.lineEdit_scope_ip:
# if not regex_ip.search(self.lineEdit_scope_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
#
# if lineedit == self.lineEdit_temp_ip:
# if not regex_ip.search(self.lineEdit_temp_ip.text()):
# QMessageBox.critical(self, 'Wrong', 'IP address format error')
# 关闭软件自动保存
def closeEvent(self, QCloseEvent):
self.data_save()
print('save success!')
# 开启软件时,将上一次关闭时保存的配置配置到软件上
def init_setting(self):
# Oscilloscope数据显示
try:
self.scope_ip = self.init_scope['scope_ip']
self.lineEdit_scope_ip.setText(self.scope_ip)
except KeyError:
pass
try:
self.scope_setup = self.init_scope['scope_setup']
self.lineEdit_scope_setup.setText(self.scope_setup)
except KeyError:
pass
# Temperature数据显示
try:
self.temp_ip = self.init_temp['temp_ip']
self.lineEdit_temp_ip.setText(self.temp_ip)
except KeyError:
pass
try:
self.temp_channel1_temp = self.init_temp['temp_channel1_temp']
self.lineEdit_temp_channl1.setText(self.temp_channel1_temp)
except KeyError:
pass
try:
self.temp_channel2_temp = self.init_temp['temp_channel2_temp']
self.lineEdit_temp_channl2.setText(self.temp_channel2_temp)
except KeyError:
pass
try:
self.temp_channel3_temp = self.init_temp['temp_channel3_temp']
self.lineEdit_temp_channl3.setText(self.temp_channel3_temp)
except KeyError:
pass
try:
self.temp_channel4_temp = self.init_temp['temp_channel4_temp']
self.lineEdit_temp_channl4.setText(self.temp_channel4_temp)
except KeyError:
pass
try:
self.temp_is_channel1_temp = self.init_temp['temp_is_channel1_temp']
self.checkBox_temp_channel1.setCheckState(self.temp_is_channel1_temp)
except KeyError:
pass
try:
self.temp_is_channel2_temp = self.init_temp['temp_is_channel2_temp']
self.checkBox_temp_channel2.setCheckState(self.temp_is_channel2_temp)
except KeyError:
pass
try:
self.temp_is_channel3_temp = self.init_temp['temp_is_channel3_temp']
self.checkBox_temp_channel3.setCheckState(self.temp_is_channel3_temp)
except KeyError:
pass
try:
self.temp_is_channel4_temp = self.init_temp['temp_is_channel4_temp']
self.checkBox_temp_channel4.setCheckState(self.temp_is_channel4_temp)
except KeyError:
pass
# Power数据显示
try:
self.power_high_voltage = self.init_power['power_high_voltage']
self.lineEdit_power_high_voltage.setText(self.power_high_voltage)
except KeyError:
pass
try:
self.power_mid_voltage = self.init_power['power_mid_voltage']
self.lineEdit_power_mid_voltage.setText(self.power_mid_voltage)
except KeyError:
pass
try:
self.power_low_voltage = self.init_power['power_low_voltage']
self.lineEdit_power_low_voltage.setText(self.power_low_voltage)
except KeyError:
pass
try:
self.power_vid = self.init_power['power_vid']
self.lineEdit_power_vid.setText(self.power_vid)
except KeyError:
pass
try:
self.power_pid = self.init_power['power_pid']
self.lineEdit_power_pid.setText(self.power_pid)
except KeyError:
pass
# Debug数据显示
try:
self.debug_port = self.init_debug['debug_port']
self.lineEdit_debug_port.setText(self.debug_port)
except KeyError:
pass
try:
self.debug_mode = self.init_debug['debug_mode']
for i in range(len(self.debug_mode)):
for j in range(len(self.debug_mode[0])):
self.tableWidget_debug_mode.setItem(i, j, QTableWidgetItem(self.debug_mode[i][j]))
except KeyError:
pass
# 界面数据保存到变量中,再保存到配置文件中
def data_save(self):
# 打开配置文件
self.init_scope = shelve.open('init/init_scope')
self.init_temp = shelve.open('init/init_temp')
self.init_power = shelve.open('init/init_power')
self.init_debug = shelve.open('init/init_debug')
# Oscilloscope标签页数据保存
self.scope_ip = self.lineEdit_scope_ip.text()
self.scope_setup = self.lineEdit_scope_setup.text()
self.init_scope['scope_ip'] = self.scope_ip
self.init_scope['scope_setup'] = self.scope_setup
# Temperature标签页数据保存
self.temp_ip = self.lineEdit_temp_ip.text()
self.temp_channel1_temp = self.lineEdit_temp_channl1.text()
self.temp_channel2_temp = self.lineEdit_temp_channl2.text()
self.temp_channel3_temp = self.lineEdit_temp_channl3.text()
self.temp_channel4_temp = self.lineEdit_temp_channl4.text()
self.temp_is_channel1_temp = self.checkBox_temp_channel1.checkState()
self.temp_is_channel2_temp = self.checkBox_temp_channel2.checkState()
self.temp_is_channel3_temp = self.checkBox_temp_channel3.checkState()
self.temp_is_channel4_temp = self.checkBox_temp_channel4.checkState()
self.init_temp['temp_ip'] = self.temp_ip
self.init_temp['temp_channel1_temp'] = self.temp_channel1_temp
self.init_temp['temp_channel2_temp'] = self.temp_channel2_temp
self.init_temp['temp_channel3_temp'] = self.temp_channel3_temp
self.init_temp['temp_channel4_temp'] = self.temp_channel4_temp
self.init_temp['temp_is_channel1_temp'] = self.temp_is_channel1_temp
self.init_temp['temp_is_channel2_temp'] = self.temp_is_channel2_temp
self.init_temp['temp_is_channel3_temp'] = self.temp_is_channel3_temp
self.init_temp['temp_is_channel4_temp'] = self.temp_is_channel4_temp
# Power标签页数据保存
self.power_high_voltage = self.lineEdit_power_high_voltage.text()
self.power_mid_voltage = self.lineEdit_power_mid_voltage.text()
self.power_low_voltage = self.lineEdit_power_low_voltage.text()
self.power_vid = self.lineEdit_power_vid.text()
self.power_pid = self.lineEdit_power_pid.text()
self.init_power['power_high_voltage'] = self.power_high_voltage
self.init_power['power_mid_voltage'] = self.power_mid_voltage
self.init_power['power_low_voltage'] = self.power_low_voltage
self.init_power['power_vid'] = self.power_vid
self.init_power['power_pid'] = self.power_pid
# Debug标签页数据保存
self.debug_port = self.lineEdit_debug_port.text()
debug_mode = []
try:
for i in range(self.tableWidget_debug_mode.rowCount()):
list_ = []
for j in range(self.tableWidget_debug_mode.columnCount()):
text = self.tableWidget_debug_mode.item(i, j).text()
if text == '':
break
list_.append(text)
if list_ == []:
break
debug_mode.append(list_)
except:
pass
self.init_debug['debug_port'] = self.debug_port
self.debug_mode = debug_mode
self.init_debug['debug_mode'] = debug_mode
# 关闭配置文件
self.init_scope.close()
self.init_temp.close()
self.init_power.close()
self.init_debug.close()
def start(self):
self.log_name = 'log/' + time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()) + '_' + 'log.txt'
self.data_save()
threading.Thread(target=self.run).start()
def run(self):
self.temp = socket_temperature_connect.Temperature()
threading.Thread(target=self.temp_info).start()
self.scope = socket_oscilloscope_connect.Oscilloscope()
threading.Thread(target=self.scope_info).start()
self.power = usb_connect.Power()
threading.Thread(target=self.power_info).start()
self.debug = serial_connect.Debug()
threading.Thread(target=self.debug_info).start()
self.temp.task_generate()
self.power.task_generate()
self.debug.task_generate()
self.temp.start()
for i in self.temp.task:
self.temp.run(i)
for j in self.power.task:
self.power.run(j)
for k in self.debug.task:
self.debug.run(k)
name = 'temp_'+str(i[0])+'-'+'power_'+str(j[1])+'-'+'debug_'+str(k[1])
self.scope.run(name)
self.temp.stop()
# 将信息打印到窗口
def temp_info(self):
while True:
if self.temp.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.temp.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.temp.is_info = False
def scope_info(self):
while True:
if self.scope.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.scope.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.scope.is_info = False
def power_info(self):
while True:
if self.power.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.power.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.power.is_info = False
def debug_info(self):
while True:
if self.debug.is_info:
text = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' -> ' + self.debug.info
f = open(self.log_name, 'a')
f.write(text + '\n')
f.close()
self.textBrowser_info_text.append(text)
self.textBrowser_info_text.moveCursor(self.textBrowser_info_text.textCursor().End)
self.debug.is_info = False
if __name__ == '__main__':
app = QApplication(sys.argv)
MainUI = MainUI()
MainUI.show()
sys.exit(app.exec_()) | zh | 0.441392 | # import usb_connect # 测试变量 # 日志名字 # 打开配置文件 # Oscilloscope标签页数据 # Temperature标签页数据 # Power标签页数据 # Debug标签页数据 # 读取初始化文件并显示在软件上 # self.lineEdit_scope_ip.textChanged.connect( # lambda: self.lineedit_slot_manage(self.lineEdit_scope_ip) # ) # regex_ip = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$') # if lineedit == self.lineEdit_scope_ip: # if not regex_ip.search(self.lineEdit_scope_ip.text()): # QMessageBox.critical(self, 'Wrong', 'IP address format error') # # if lineedit == self.lineEdit_temp_ip: # if not regex_ip.search(self.lineEdit_temp_ip.text()): # QMessageBox.critical(self, 'Wrong', 'IP address format error') # 关闭软件自动保存 # 开启软件时,将上一次关闭时保存的配置配置到软件上 # Oscilloscope数据显示 # Temperature数据显示 # Power数据显示 # Debug数据显示 # 界面数据保存到变量中,再保存到配置文件中 # 打开配置文件 # Oscilloscope标签页数据保存 # Temperature标签页数据保存 # Power标签页数据保存 # Debug标签页数据保存 # 关闭配置文件 # 将信息打印到窗口 | 2.226083 | 2 |
sdk/python/pulumi_spotinst/subscription.py | pulumi/pulumi-spotinst | 4 | 6620128 | <reponame>pulumi/pulumi-spotinst
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
endpoint: pulumi.Input[str],
event_type: pulumi.Input[str],
protocol: pulumi.Input[str],
resource_id: pulumi.Input[str],
format: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "event_type", event_type)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "resource_id", resource_id)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Input[str]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: pulumi.Input[str]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "format", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if event_type is not None:
pulumi.set(__self__, "event_type", event_type)
if format is not None:
pulumi.set(__self__, "format", format)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> Optional[pulumi.Input[str]]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Spotinst subscription resource.
## Example Usage
```python
import pulumi
import pulumi_spotinst as spotinst
# Create a Subscription
default_subscription = spotinst.Subscription("default-subscription",
endpoint="http://endpoint.com",
event_type="AWS_EC2_INSTANCE_LAUNCH",
format={
"event": "%event%",
"instance_id": "%instance-id%",
"resource_id": "%resource-id%",
"resource_name": "%resource-name%",
"tags": "foo,baz,baz",
},
protocol="http",
resource_id=spotinst_elastigroup_aws["my-eg"]["id"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Spotinst subscription resource.
## Example Usage
```python
import pulumi
import pulumi_spotinst as spotinst
# Create a Subscription
default_subscription = spotinst.Subscription("default-subscription",
endpoint="http://endpoint.com",
event_type="AWS_EC2_INSTANCE_LAUNCH",
format={
"event": "%event%",
"instance_id": "%instance-id%",
"resource_id": "%resource-id%",
"resource_name": "%resource-name%",
"tags": "foo,baz,baz",
},
protocol="http",
resource_id=spotinst_elastigroup_aws["my-eg"]["id"])
```
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
if endpoint is None and not opts.urn:
raise TypeError("Missing required property 'endpoint'")
__props__.__dict__["endpoint"] = endpoint
if event_type is None and not opts.urn:
raise TypeError("Missing required property 'event_type'")
__props__.__dict__["event_type"] = event_type
__props__.__dict__["format"] = format
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__.__dict__["resource_id"] = resource_id
super(Subscription, __self__).__init__(
'spotinst:index/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionState.__new__(_SubscriptionState)
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["event_type"] = event_type
__props__.__dict__["format"] = format
__props__.__dict__["protocol"] = protocol
__props__.__dict__["resource_id"] = resource_id
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Output[str]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@property
@pulumi.getter
def format(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
endpoint: pulumi.Input[str],
event_type: pulumi.Input[str],
protocol: pulumi.Input[str],
resource_id: pulumi.Input[str],
format: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "event_type", event_type)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "resource_id", resource_id)
if format is not None:
pulumi.set(__self__, "format", format)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Input[str]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: pulumi.Input[str]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "format", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if event_type is not None:
pulumi.set(__self__, "event_type", event_type)
if format is not None:
pulumi.set(__self__, "format", format)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> Optional[pulumi.Input[str]]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Spotinst subscription resource.
## Example Usage
```python
import pulumi
import pulumi_spotinst as spotinst
# Create a Subscription
default_subscription = spotinst.Subscription("default-subscription",
endpoint="http://endpoint.com",
event_type="AWS_EC2_INSTANCE_LAUNCH",
format={
"event": "%event%",
"instance_id": "%instance-id%",
"resource_id": "%resource-id%",
"resource_name": "%resource-name%",
"tags": "foo,baz,baz",
},
protocol="http",
resource_id=spotinst_elastigroup_aws["my-eg"]["id"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Spotinst subscription resource.
## Example Usage
```python
import pulumi
import pulumi_spotinst as spotinst
# Create a Subscription
default_subscription = spotinst.Subscription("default-subscription",
endpoint="http://endpoint.com",
event_type="AWS_EC2_INSTANCE_LAUNCH",
format={
"event": "%event%",
"instance_id": "%instance-id%",
"resource_id": "%resource-id%",
"resource_name": "%resource-name%",
"tags": "foo,baz,baz",
},
protocol="http",
resource_id=spotinst_elastigroup_aws["my-eg"]["id"])
```
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
if endpoint is None and not opts.urn:
raise TypeError("Missing required property 'endpoint'")
__props__.__dict__["endpoint"] = endpoint
if event_type is None and not opts.urn:
raise TypeError("Missing required property 'event_type'")
__props__.__dict__["event_type"] = event_type
__props__.__dict__["format"] = format
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__.__dict__["resource_id"] = resource_id
super(Subscription, __self__).__init__(
'spotinst:index/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
endpoint: Optional[pulumi.Input[str]] = None,
event_type: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Mapping[str, Any]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
:param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
:param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionState.__new__(_SubscriptionState)
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["event_type"] = event_type
__props__.__dict__["format"] = format
__props__.__dict__["protocol"] = protocol
__props__.__dict__["resource_id"] = resource_id
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Output[str]:
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`,
`"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`,
`"GROUP_UPDATED"`,
`"AWS_EMR_PROVISION_TIMEOUT"`,
`"GROUP_BEANSTALK_INIT_READY"`,
`"AZURE_VM_TERMINATED"`,
`"AZURE_VM_TERMINATE"`,
`"AWS_EC2_MANAGED_INSTANCE_PAUSING"`,
`"AWS_EC2_MANAGED_INSTANCE_RESUMING"`,
`"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`.
Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`.
"""
return pulumi.get(self, "event_type")
@property
@pulumi.getter
def format(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"`
Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` }
Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
The following values are deprecated: `"http"` , `"https"`
You can use the generic `"web"` protocol instead.
`"aws-sns"` is only supported with AWS provider
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
Spotinst Resource id (Elastigroup or Ocean ID).
"""
return pulumi.get(self, "resource_id") | en | 0.304508 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Subscription resource. :param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. :param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. :param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider :param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID). :param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider Spotinst Resource id (Elastigroup or Ocean ID). The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. Input properties used for looking up and filtering Subscription resources. :param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. :param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. :param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. :param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider :param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID). The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider Spotinst Resource id (Elastigroup or Ocean ID). Provides a Spotinst subscription resource. ## Example Usage ```python import pulumi import pulumi_spotinst as spotinst # Create a Subscription default_subscription = spotinst.Subscription("default-subscription", endpoint="http://endpoint.com", event_type="AWS_EC2_INSTANCE_LAUNCH", format={ "event": "%event%", "instance_id": "%instance-id%", "resource_id": "%resource-id%", "resource_name": "%resource-name%", "tags": "foo,baz,baz", }, protocol="http", resource_id=spotinst_elastigroup_aws["my-eg"]["id"]) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. :param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. :param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. :param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider :param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID). Provides a Spotinst subscription resource. ## Example Usage ```python import pulumi import pulumi_spotinst as spotinst # Create a Subscription default_subscription = spotinst.Subscription("default-subscription", endpoint="http://endpoint.com", event_type="AWS_EC2_INSTANCE_LAUNCH", format={ "event": "%event%", "instance_id": "%instance-id%", "resource_id": "%resource-id%", "resource_name": "%resource-name%", "tags": "foo,baz,baz", }, protocol="http", resource_id=spotinst_elastigroup_aws["my-eg"]["id"]) ``` :param str resource_name: The name of the resource. :param SubscriptionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Subscription resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] endpoint: The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. :param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. :param pulumi.Input[Mapping[str, Any]] format: The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. :param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider :param pulumi.Input[str] resource_id: Spotinst Resource id (Elastigroup or Ocean ID). The endpoint the notification will be sent to. url in case of `"http"`/`"https"`/`"web"`, email address in case of `"email"`/`"email-json"` and sns-topic-arn in case of `"aws-sns"`. The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"GROUP_BEANSTALK_INIT_READY"`, `"AZURE_VM_TERMINATED"`, `"AZURE_VM_TERMINATE"`, `"AWS_EC2_MANAGED_INSTANCE_PAUSING"`, `"AWS_EC2_MANAGED_INSTANCE_RESUMING"`, `"AWS_EC2_MANAGED_INSTANCE_RECYCLING"`,`"AWS_EC2_MANAGED_INSTANCE_DELETING"`. Ocean Events:`"CLUSTER_ROLL_FINISHED"`,`"GROUP_ROLL_FAILED"`. The format of the notification content (JSON Format - Key+Value). Valid Values : `"instance-id"`, `"event"`, `"resource-id"`, `"resource-name"`, `"subnet-id"`, `"availability-zone"`, `"reason"`, `"private-ip"`, `"launchspec-id"` Example: {"event": `"event"`, `"resourceId"`: `"resource-id"`, `"resourceName"`: `"resource-name"`", `"myCustomKey"`: `"My content is set here"` } Default: {`"event"`: `"<event>"`, `"instanceId"`: `"<instance-id>"`, `"resourceId"`: `"<resource-id>"`, `"resourceName"`: `"<resource-name>"` }. The protocol to send the notification. Valid values: `"email"`, `"email-json"`, `"aws-sns"`, `"web"`. The following values are deprecated: `"http"` , `"https"` You can use the generic `"web"` protocol instead. `"aws-sns"` is only supported with AWS provider Spotinst Resource id (Elastigroup or Ocean ID). | 1.899107 | 2 |
modes/printcolorlist.py | k4cg/k4cglicht | 6 | 6620129 | from mode import Mode
class PrintColorList(Mode):
@staticmethod
def get_params():
return ('farben', None)
@staticmethod
def execute(light_utils, argument=None):
light_utils.print_all_colors()
| from mode import Mode
class PrintColorList(Mode):
@staticmethod
def get_params():
return ('farben', None)
@staticmethod
def execute(light_utils, argument=None):
light_utils.print_all_colors()
| none | 1 | 2.293352 | 2 | |
elements.py | frolov-pchem/ffconv | 0 | 6620130 | <reponame>frolov-pchem/ffconv<gh_stars>0
# <NAME>, Jan 2014, ISC RAS, Ivanovo, Russia
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from molecule_class import *
Elements = {
'H':
{'Mass':1.0079, 'PeriodicTableNum':1 },
'C':
{'Mass':12.011, 'PeriodicTableNum':6 },
'O':
{'Mass':15.999, 'PeriodicTableNum':8 },
'N':
{'Mass':14.007, 'PeriodicTableNum':7 },
'P':
{'Mass':30.973762, 'PeriodicTableNum':15 },
'S':
{'Mass':32.07, 'PeriodicTableNum':16 },
'F':
{'Mass':18.9984032, 'PeriodicTableNum':9 },
'CL':
{'Mass':35.453, 'PeriodicTableNum':17 },
'BR':
{'Mass':79.904, 'PeriodicTableNum':35 },
'I':
{'Mass':126.90447, 'PeriodicTableNum':53 },
'AL':
{'Mass':26.9815386, 'PeriodicTableNum':13 }
}
def TrimElementName(s):
El = s[:2].upper()
if El in Elements.keys():
return El
else:
if El[:1] in Elements.keys():
return El[:1]
else:
sys.stderr.write("--- !Warning in "+inspect.stack()[0][3]+": could not get an element name from the string ["+str(s)+"]. Maybe extend the elements list? Known elements: ["+str(Elements)+"]. Returning none. \n")
return 'none'
| # <NAME>, Jan 2014, ISC RAS, Ivanovo, Russia
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from molecule_class import *
Elements = {
'H':
{'Mass':1.0079, 'PeriodicTableNum':1 },
'C':
{'Mass':12.011, 'PeriodicTableNum':6 },
'O':
{'Mass':15.999, 'PeriodicTableNum':8 },
'N':
{'Mass':14.007, 'PeriodicTableNum':7 },
'P':
{'Mass':30.973762, 'PeriodicTableNum':15 },
'S':
{'Mass':32.07, 'PeriodicTableNum':16 },
'F':
{'Mass':18.9984032, 'PeriodicTableNum':9 },
'CL':
{'Mass':35.453, 'PeriodicTableNum':17 },
'BR':
{'Mass':79.904, 'PeriodicTableNum':35 },
'I':
{'Mass':126.90447, 'PeriodicTableNum':53 },
'AL':
{'Mass':26.9815386, 'PeriodicTableNum':13 }
}
def TrimElementName(s):
El = s[:2].upper()
if El in Elements.keys():
return El
else:
if El[:1] in Elements.keys():
return El[:1]
else:
sys.stderr.write("--- !Warning in "+inspect.stack()[0][3]+": could not get an element name from the string ["+str(s)+"]. Maybe extend the elements list? Known elements: ["+str(Elements)+"]. Returning none. \n")
return 'none' | en | 0.836624 | # <NAME>, Jan 2014, ISC RAS, Ivanovo, Russia # # Copyright 2014 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.143388 | 2 |
pydisp/pydisp.py | dimatura/pydisplay | 2 | 6620131 | <filename>pydisp/pydisp.py
# -*- coding: utf-8 -*-
import cStringIO as StringIO
import base64
import json
import uuid
import os
from PIL import Image
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
import requests
__all__ = ['image',
'dyplot',
'send',
'text',
'pylab',
'pane',
'b64_encode',
'is_valid_image_mime_type',
'CONFIG',
]
VALID_IMAGE_MIME_TYPES = {'png','gif','bmp','webp','jpeg'}
class CONFIG(object):
PORT = 8000
HOSTNAME = 'localhost'
@staticmethod
def load_config():
# TODO what is the right way (TM)
fname = os.path.join(os.environ['HOME'], '.display', 'config.json')
if os.path.exists(fname):
with open(fname, 'r') as f:
cfg = json.load(f)
CONFIG.PORT = int(cfg['port'])
CONFIG.HOSTNAME = cfg['hostname']
@staticmethod
def display_url():
return "http://{:s}:{:d}/events".format(CONFIG.HOSTNAME, CONFIG.PORT)
CONFIG.load_config()
def send(**command):
""" send command to server """
command = json.dumps(command)
headers = {'Content-Type': 'application/text'}
req = requests.post(CONFIG.display_url(), headers=headers, data=command.encode('ascii'))
resp = req.content
return resp is not None
def uid():
""" return a unique id for a pane """
return 'pane_{}'.format(uuid.uuid4())
def pane(panetype, win, title, content):
""" create a pane (formerly window) """
if win is None:
win = uid()
send(command='pane', type=panetype, id=win, title=title, content=content)
return win
def is_valid_image_mime_type(mt):
return mt in VALID_IMAGE_MIME_TYPES
def scalar_preprocess(img, **kwargs):
""" vmin, vmax, clip, cmap """
vmin = kwargs.get('vmin')
vmax = kwargs.get('vmax')
clip = kwargs.get('clip')
cmap = kwargs.get('cmap', 'jet')
# TODO customization
normalizer = mpl.colors.Normalize(vmin, vmax, clip)
nimg = normalizer(img)
cmap = cm.get_cmap(cmap)
cimg = cmap(nimg)[:, :, :3] # ignore alpha
simg = (255*cimg).astype(np.uint8)
return simg
def rgb_preprocess(img):
if np.issubdtype(img.dtype, np.float):
# assuming 0., 1. range
return (img*255).clip(0, 255).astype(np.uint8)
if not img.dtype == np.uint8:
raise ValueError('only uint8 or float for 3-channel images')
return img
def img_encode(img, encoding):
# ret, data = cv2.imencode('.'+encoding, img)
if encoding=='jpg':
encoding = 'jpeg'
buf = StringIO.StringIO()
Image.fromarray(img).save(buf, format=encoding)
data = buf.getvalue()
buf.close()
return data
def b64_encode(data, encoding):
b64data = ('data:image/{};base64,{}'
.format(encoding, base64.b64encode(data).decode('ascii')))
return b64data
def pylab(fig, **kwargs):
""" Display a matplotlib figure. """
# save figure to buffer
output = StringIO.StringIO()
fig.savefig(output, format='png')
data = output.getvalue()
output.close()
encoded = b64_encode(data, 'png')
pydisp.pane('image',
win=kwargs.get('win'),
title=kwargs.get('title'),
content={
'src': encoded,
'width': kwargs.get('width'),
})
return win
def image(img, **kwargs):
""" Display image encoded as an array.
image(img, [win, title, labels, width, kwargs])
to_bgr: swap blue and red channels (default False)
encoding: 'jpg' (default) or 'png'
kwargs is argument for scalar preprocessing
"""
to_bgr = kwargs.get('to_bgr', False)
if img.ndim not in (2, 3):
raise ValueError('image should be 2 (gray) or 3 (rgb) dimensional')
assert img.ndim == 2 or img.ndim == 3
if img.ndim == 3:
img = rgb_preprocess(img)
else:
img = scalar_preprocess(img, **kwargs)
if to_bgr:
img = img[...,[2, 1, 0]]
encoding = kwargs.get('encoding', 'jpg')
data = img_encode(img, encoding)
encoded = b64_encode(data, encoding)
return pane('image',
kwargs.get('win'),
kwargs.get('title'),
content={
'src': encoded,
'labels': kwargs.get('labels'),
'width': kwargs.get('width'),
})
def text(txt, **kwargs):
win = kwargs.get('win') or uid()
title = kwargs.get('title') or 'text'
return pane('text',
win,
title,
content=txt)
def dyplot(data, **kwargs):
""" Plot data as line chart with dygraph
Params:
data: either a 2-d numpy array or a list of lists.
win: pane id
labels: list of series names, first series is always the X-axis
see http://dygraphs.com/options.html for other supported options
"""
win = kwargs.get('win') or uid()
dataset = {}
if type(data).__module__ == np.__name__:
dataset = data.tolist()
else:
dataset = data
# clone kwargs into options
options = dict(kwargs)
options['file'] = dataset
if options.get('labels'):
options['xlabel'] = options['labels'][0]
# Don't pass our options to dygraphs.
options.pop('win', None)
return pane('plot', kwargs.get('win'), kwargs.get('title'), content=options)
| <filename>pydisp/pydisp.py
# -*- coding: utf-8 -*-
import cStringIO as StringIO
import base64
import json
import uuid
import os
from PIL import Image
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
import requests
__all__ = ['image',
'dyplot',
'send',
'text',
'pylab',
'pane',
'b64_encode',
'is_valid_image_mime_type',
'CONFIG',
]
VALID_IMAGE_MIME_TYPES = {'png','gif','bmp','webp','jpeg'}
class CONFIG(object):
PORT = 8000
HOSTNAME = 'localhost'
@staticmethod
def load_config():
# TODO what is the right way (TM)
fname = os.path.join(os.environ['HOME'], '.display', 'config.json')
if os.path.exists(fname):
with open(fname, 'r') as f:
cfg = json.load(f)
CONFIG.PORT = int(cfg['port'])
CONFIG.HOSTNAME = cfg['hostname']
@staticmethod
def display_url():
return "http://{:s}:{:d}/events".format(CONFIG.HOSTNAME, CONFIG.PORT)
CONFIG.load_config()
def send(**command):
""" send command to server """
command = json.dumps(command)
headers = {'Content-Type': 'application/text'}
req = requests.post(CONFIG.display_url(), headers=headers, data=command.encode('ascii'))
resp = req.content
return resp is not None
def uid():
""" return a unique id for a pane """
return 'pane_{}'.format(uuid.uuid4())
def pane(panetype, win, title, content):
""" create a pane (formerly window) """
if win is None:
win = uid()
send(command='pane', type=panetype, id=win, title=title, content=content)
return win
def is_valid_image_mime_type(mt):
return mt in VALID_IMAGE_MIME_TYPES
def scalar_preprocess(img, **kwargs):
""" vmin, vmax, clip, cmap """
vmin = kwargs.get('vmin')
vmax = kwargs.get('vmax')
clip = kwargs.get('clip')
cmap = kwargs.get('cmap', 'jet')
# TODO customization
normalizer = mpl.colors.Normalize(vmin, vmax, clip)
nimg = normalizer(img)
cmap = cm.get_cmap(cmap)
cimg = cmap(nimg)[:, :, :3] # ignore alpha
simg = (255*cimg).astype(np.uint8)
return simg
def rgb_preprocess(img):
if np.issubdtype(img.dtype, np.float):
# assuming 0., 1. range
return (img*255).clip(0, 255).astype(np.uint8)
if not img.dtype == np.uint8:
raise ValueError('only uint8 or float for 3-channel images')
return img
def img_encode(img, encoding):
# ret, data = cv2.imencode('.'+encoding, img)
if encoding=='jpg':
encoding = 'jpeg'
buf = StringIO.StringIO()
Image.fromarray(img).save(buf, format=encoding)
data = buf.getvalue()
buf.close()
return data
def b64_encode(data, encoding):
b64data = ('data:image/{};base64,{}'
.format(encoding, base64.b64encode(data).decode('ascii')))
return b64data
def pylab(fig, **kwargs):
""" Display a matplotlib figure. """
# save figure to buffer
output = StringIO.StringIO()
fig.savefig(output, format='png')
data = output.getvalue()
output.close()
encoded = b64_encode(data, 'png')
pydisp.pane('image',
win=kwargs.get('win'),
title=kwargs.get('title'),
content={
'src': encoded,
'width': kwargs.get('width'),
})
return win
def image(img, **kwargs):
""" Display image encoded as an array.
image(img, [win, title, labels, width, kwargs])
to_bgr: swap blue and red channels (default False)
encoding: 'jpg' (default) or 'png'
kwargs is argument for scalar preprocessing
"""
to_bgr = kwargs.get('to_bgr', False)
if img.ndim not in (2, 3):
raise ValueError('image should be 2 (gray) or 3 (rgb) dimensional')
assert img.ndim == 2 or img.ndim == 3
if img.ndim == 3:
img = rgb_preprocess(img)
else:
img = scalar_preprocess(img, **kwargs)
if to_bgr:
img = img[...,[2, 1, 0]]
encoding = kwargs.get('encoding', 'jpg')
data = img_encode(img, encoding)
encoded = b64_encode(data, encoding)
return pane('image',
kwargs.get('win'),
kwargs.get('title'),
content={
'src': encoded,
'labels': kwargs.get('labels'),
'width': kwargs.get('width'),
})
def text(txt, **kwargs):
win = kwargs.get('win') or uid()
title = kwargs.get('title') or 'text'
return pane('text',
win,
title,
content=txt)
def dyplot(data, **kwargs):
""" Plot data as line chart with dygraph
Params:
data: either a 2-d numpy array or a list of lists.
win: pane id
labels: list of series names, first series is always the X-axis
see http://dygraphs.com/options.html for other supported options
"""
win = kwargs.get('win') or uid()
dataset = {}
if type(data).__module__ == np.__name__:
dataset = data.tolist()
else:
dataset = data
# clone kwargs into options
options = dict(kwargs)
options['file'] = dataset
if options.get('labels'):
options['xlabel'] = options['labels'][0]
# Don't pass our options to dygraphs.
options.pop('win', None)
return pane('plot', kwargs.get('win'), kwargs.get('title'), content=options)
| en | 0.688485 | # -*- coding: utf-8 -*- # TODO what is the right way (TM) send command to server return a unique id for a pane create a pane (formerly window) vmin, vmax, clip, cmap # TODO customization # ignore alpha # assuming 0., 1. range # ret, data = cv2.imencode('.'+encoding, img) Display a matplotlib figure. # save figure to buffer Display image encoded as an array. image(img, [win, title, labels, width, kwargs]) to_bgr: swap blue and red channels (default False) encoding: 'jpg' (default) or 'png' kwargs is argument for scalar preprocessing Plot data as line chart with dygraph Params: data: either a 2-d numpy array or a list of lists. win: pane id labels: list of series names, first series is always the X-axis see http://dygraphs.com/options.html for other supported options # clone kwargs into options # Don't pass our options to dygraphs. | 2.617756 | 3 |
Python/maximum-frequency-stack.py | RideGreg/LeetCode | 1 | 6620132 | <reponame>RideGreg/LeetCode
# Time: O(1)
# Space: O(n)
# 895
# Implement FreqStack,
# a class which simulates the operation of a stack-like data structure.
#
# FreqStack has two functions:
#
# push(int x), which pushes an integer x onto the stack.
# pop(), which removes and returns the most frequent element in the stack.
# If there is a tie for most frequent element,
# the element closest to the top of the stack is removed and returned.
#
# Example 1:
#
# Input:
# ["FreqStack","push","push","push","push","push","push","pop","pop","pop","pop"],
# [[],[5],[7],[5],[7],[4],[5],[],[],[],[]]
# Output: [null,null,null,null,null,null,null,5,7,5,4]
# Explanation:
# After making six .push operations, the stack is [5,7,5,7,4,5] from bottom to top. Then:
#
# pop() -> returns 5, as 5 is the most frequent.
# The stack becomes [5,7,5,7,4].
#
# pop() -> returns 7, as 5 and 7 is the most frequent, but 7 is closest to the top.
# The stack becomes [5,7,5,4].
#
# pop() -> returns 5.
# The stack becomes [5,7,4].
#
# pop() -> returns 4.
# The stack becomes [5,7].
#
# Note:
# - Calls to FreqStack.push(int x) will be such that 0 <= x <= 10^9.
# - It is guaranteed that FreqStack.pop() won't be called if the stack has zero elements.
# - The total number of FreqStack.push calls will not exceed 10000 in a single test case.
# - The total number of FreqStack.pop calls will not exceed 10000 in a single test case.
# - The total number of FreqStack.push and
# FreqStack.pop calls will not exceed 150000 across all test cases.
import collections
# Very good: 1. Multiple stacks: maintain a mapping from 'freq' key to STACK of values, the stack remembers insertion order.
# for example, push 5,7,5,7,4,5, we store
# freq 1 : [5,7,4]
# freq 2 : [5,7]
# freq 3 : [5]
# 2. Also store maxFreq, so don't re-count on every pop.
# 3. Obviously maintain another mapping of value to freq which is basic for this problem.
class FreqStack(object):
def __init__(self):
self.__freq = collections.Counter()
self.__group = collections.defaultdict(list) # list is treated as a stack to remember insertion order.
self.__maxfreq = 0
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.__freq[x] += 1
f = self.__freq[x]
self.__maxfreq = max(self.__maxfreq, f)
self.__group[f].append(x) # don't remove it from f-1 stack, otherwise in pop we need to insert back to f-1 stack
def pop(self):
"""
:rtype: int
"""
x = self.__group[self.__maxfreq].pop() # list pop by index
if not self.__group[self.__maxfreq]:
# self.__group.pop(self.__maxfreq) # no need to cleanup, maintain maxfreq is enough
self.__maxfreq -= 1
self.__freq[x] -= 1
return x
# Time bad: if not maintain maxFreq, then TLE due to calculate maxFreq every pop.
# Space bad: store every insert (ids for each x value)
class FreqStack_ming(object):
def __init__(self):
# self.h = []
self.pos = collections.defaultdict(list)
self.id = 0
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.id += 1
self.pos[x].append(self.id)
def pop(self):
"""
:rtype: int
"""
ans = max(self.pos, key=lambda x: (len(self.pos[x]), self.pos[x][-1]))
self.pos[ans].pop()
if not self.pos[ans]:
del self.pos[ans]
return ans
obj = FreqStack()
obj.push(4)
obj.push(0)
obj.push(9)
obj.push(3)
obj.push(4)
obj.push(2)
print(obj.pop()) # 4
obj.push(6)
print(obj.pop()) # 6
obj.push(1)
print(obj.pop()) # 1
obj.push(1)
print(obj.pop()) # 1
obj.push(4)
for _ in xrange(6):
print(obj.pop()) # 4,2,3,9,0,4
obj = FreqStack()
obj.push(5)
obj.push(7)
obj.push(5)
obj.push(7)
obj.push(4)
obj.push(5)
for _ in xrange(4):
print(obj.pop()) # 5,7,5,4
| # Time: O(1)
# Space: O(n)
# 895
# Implement FreqStack,
# a class which simulates the operation of a stack-like data structure.
#
# FreqStack has two functions:
#
# push(int x), which pushes an integer x onto the stack.
# pop(), which removes and returns the most frequent element in the stack.
# If there is a tie for most frequent element,
# the element closest to the top of the stack is removed and returned.
#
# Example 1:
#
# Input:
# ["FreqStack","push","push","push","push","push","push","pop","pop","pop","pop"],
# [[],[5],[7],[5],[7],[4],[5],[],[],[],[]]
# Output: [null,null,null,null,null,null,null,5,7,5,4]
# Explanation:
# After making six .push operations, the stack is [5,7,5,7,4,5] from bottom to top. Then:
#
# pop() -> returns 5, as 5 is the most frequent.
# The stack becomes [5,7,5,7,4].
#
# pop() -> returns 7, as 5 and 7 is the most frequent, but 7 is closest to the top.
# The stack becomes [5,7,5,4].
#
# pop() -> returns 5.
# The stack becomes [5,7,4].
#
# pop() -> returns 4.
# The stack becomes [5,7].
#
# Note:
# - Calls to FreqStack.push(int x) will be such that 0 <= x <= 10^9.
# - It is guaranteed that FreqStack.pop() won't be called if the stack has zero elements.
# - The total number of FreqStack.push calls will not exceed 10000 in a single test case.
# - The total number of FreqStack.pop calls will not exceed 10000 in a single test case.
# - The total number of FreqStack.push and
# FreqStack.pop calls will not exceed 150000 across all test cases.
import collections
# Very good: 1. Multiple stacks: maintain a mapping from 'freq' key to STACK of values, the stack remembers insertion order.
# for example, push 5,7,5,7,4,5, we store
# freq 1 : [5,7,4]
# freq 2 : [5,7]
# freq 3 : [5]
# 2. Also store maxFreq, so don't re-count on every pop.
# 3. Obviously maintain another mapping of value to freq which is basic for this problem.
class FreqStack(object):
def __init__(self):
self.__freq = collections.Counter()
self.__group = collections.defaultdict(list) # list is treated as a stack to remember insertion order.
self.__maxfreq = 0
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.__freq[x] += 1
f = self.__freq[x]
self.__maxfreq = max(self.__maxfreq, f)
self.__group[f].append(x) # don't remove it from f-1 stack, otherwise in pop we need to insert back to f-1 stack
def pop(self):
"""
:rtype: int
"""
x = self.__group[self.__maxfreq].pop() # list pop by index
if not self.__group[self.__maxfreq]:
# self.__group.pop(self.__maxfreq) # no need to cleanup, maintain maxfreq is enough
self.__maxfreq -= 1
self.__freq[x] -= 1
return x
# Time bad: if not maintain maxFreq, then TLE due to calculate maxFreq every pop.
# Space bad: store every insert (ids for each x value)
class FreqStack_ming(object):
def __init__(self):
# self.h = []
self.pos = collections.defaultdict(list)
self.id = 0
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.id += 1
self.pos[x].append(self.id)
def pop(self):
"""
:rtype: int
"""
ans = max(self.pos, key=lambda x: (len(self.pos[x]), self.pos[x][-1]))
self.pos[ans].pop()
if not self.pos[ans]:
del self.pos[ans]
return ans
obj = FreqStack()
obj.push(4)
obj.push(0)
obj.push(9)
obj.push(3)
obj.push(4)
obj.push(2)
print(obj.pop()) # 4
obj.push(6)
print(obj.pop()) # 6
obj.push(1)
print(obj.pop()) # 1
obj.push(1)
print(obj.pop()) # 1
obj.push(4)
for _ in xrange(6):
print(obj.pop()) # 4,2,3,9,0,4
obj = FreqStack()
obj.push(5)
obj.push(7)
obj.push(5)
obj.push(7)
obj.push(4)
obj.push(5)
for _ in xrange(4):
print(obj.pop()) # 5,7,5,4 | en | 0.844737 | # Time: O(1) # Space: O(n) # 895 # Implement FreqStack, # a class which simulates the operation of a stack-like data structure. # # FreqStack has two functions: # # push(int x), which pushes an integer x onto the stack. # pop(), which removes and returns the most frequent element in the stack. # If there is a tie for most frequent element, # the element closest to the top of the stack is removed and returned. # # Example 1: # # Input: # ["FreqStack","push","push","push","push","push","push","pop","pop","pop","pop"], # [[],[5],[7],[5],[7],[4],[5],[],[],[],[]] # Output: [null,null,null,null,null,null,null,5,7,5,4] # Explanation: # After making six .push operations, the stack is [5,7,5,7,4,5] from bottom to top. Then: # # pop() -> returns 5, as 5 is the most frequent. # The stack becomes [5,7,5,7,4]. # # pop() -> returns 7, as 5 and 7 is the most frequent, but 7 is closest to the top. # The stack becomes [5,7,5,4]. # # pop() -> returns 5. # The stack becomes [5,7,4]. # # pop() -> returns 4. # The stack becomes [5,7]. # # Note: # - Calls to FreqStack.push(int x) will be such that 0 <= x <= 10^9. # - It is guaranteed that FreqStack.pop() won't be called if the stack has zero elements. # - The total number of FreqStack.push calls will not exceed 10000 in a single test case. # - The total number of FreqStack.pop calls will not exceed 10000 in a single test case. # - The total number of FreqStack.push and # FreqStack.pop calls will not exceed 150000 across all test cases. # Very good: 1. Multiple stacks: maintain a mapping from 'freq' key to STACK of values, the stack remembers insertion order. # for example, push 5,7,5,7,4,5, we store # freq 1 : [5,7,4] # freq 2 : [5,7] # freq 3 : [5] # 2. Also store maxFreq, so don't re-count on every pop. # 3. Obviously maintain another mapping of value to freq which is basic for this problem. # list is treated as a stack to remember insertion order. :type x: int :rtype: void # don't remove it from f-1 stack, otherwise in pop we need to insert back to f-1 stack :rtype: int # list pop by index # self.__group.pop(self.__maxfreq) # no need to cleanup, maintain maxfreq is enough # Time bad: if not maintain maxFreq, then TLE due to calculate maxFreq every pop. # Space bad: store every insert (ids for each x value) # self.h = [] :type x: int :rtype: void :rtype: int # 4 # 6 # 1 # 1 # 4,2,3,9,0,4 # 5,7,5,4 | 3.6271 | 4 |
twitter_br_lms/split_data.py | huberemanuel/twitter-br | 0 | 6620133 | <filename>twitter_br_lms/split_data.py
import argparse
from collections import defaultdict
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
from twitter_br_lms.args import SmartFormatter
MAX_TWEETS_DATASET = 30_000_000 # Max tweets to get from a single file.
def main():
parser = argparse.ArgumentParser(
"Split interim datasets into train and validation sets", formatter_class=SmartFormatter
)
parser.add_argument(
"--data_path",
type=str,
help="""R|Path to the input data. The directory should have the following structure:
data_path/
dataset1/
train.csv
dataset2/
file.csv
datasetn/
random_name.csv""",
)
parser.add_argument(
"--output_path",
type=str,
help="Output path that processed CSVs are going to be stored.",
default=".",
)
parser.add_argument(
"--train_frac",
type=float,
help="Fractino of the dataset to be set as the training set. The (1 `train_frac`)"
" will be used as the test size.",
default=0.9,
)
parser.add_argument(
"--drop_duplicates",
action="store_true",
default=False,
help="If set the pandas.drop_duplicates will be executed, this may take a while to finish",
)
parser.add_argument(
"--seed", type=int, help="Default seed used in pandas random state", default=42
)
args = parser.parse_args()
data_path = Path(args.data_path)
output_path = Path(args.output_path)
if not data_path.exists():
raise ValueError("data_path {} does not exists".format(args.data_path))
if not output_path.exists():
raise ValueError("output_path {} does not exists".format(args.output_path))
input_files = list(data_path.glob("**/*.csv"))
samples = defaultdict(list)
for input_file in tqdm(input_files, desc="Splitting interim data into train and val sets"):
dataset_name = Path(input_file).parent
df = pd.read_csv(input_file, header=0, names=["text"])
samples[dataset_name] += df["text"].to_list()
sentences = []
for dataset, tweets in samples.items():
if len(tweets) > MAX_TWEETS_DATASET:
samples[dataset] = pd.DataFrame(tweets).sample(MAX_TWEETS_DATASET)[0].to_list()
sentences += samples[dataset]
df = pd.DataFrame(sentences, columns=["text"])
if args.drop_duplicates:
print("Dropping duplicates... go grab a ☕")
df = df.drop_duplicates(subset=["text"])
train_df = df.sample(frac=args.train_frac, random_state=args.seed)
val_df = df.drop(train_df.index)
train_df.to_csv(output_path.joinpath("train.csv"), index=None, header=0)
val_df.to_csv(output_path.joinpath("val.csv"), index=None, header=0)
if __name__ == "__main__":
main()
| <filename>twitter_br_lms/split_data.py
import argparse
from collections import defaultdict
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
from twitter_br_lms.args import SmartFormatter
MAX_TWEETS_DATASET = 30_000_000 # Max tweets to get from a single file.
def main():
parser = argparse.ArgumentParser(
"Split interim datasets into train and validation sets", formatter_class=SmartFormatter
)
parser.add_argument(
"--data_path",
type=str,
help="""R|Path to the input data. The directory should have the following structure:
data_path/
dataset1/
train.csv
dataset2/
file.csv
datasetn/
random_name.csv""",
)
parser.add_argument(
"--output_path",
type=str,
help="Output path that processed CSVs are going to be stored.",
default=".",
)
parser.add_argument(
"--train_frac",
type=float,
help="Fractino of the dataset to be set as the training set. The (1 `train_frac`)"
" will be used as the test size.",
default=0.9,
)
parser.add_argument(
"--drop_duplicates",
action="store_true",
default=False,
help="If set the pandas.drop_duplicates will be executed, this may take a while to finish",
)
parser.add_argument(
"--seed", type=int, help="Default seed used in pandas random state", default=42
)
args = parser.parse_args()
data_path = Path(args.data_path)
output_path = Path(args.output_path)
if not data_path.exists():
raise ValueError("data_path {} does not exists".format(args.data_path))
if not output_path.exists():
raise ValueError("output_path {} does not exists".format(args.output_path))
input_files = list(data_path.glob("**/*.csv"))
samples = defaultdict(list)
for input_file in tqdm(input_files, desc="Splitting interim data into train and val sets"):
dataset_name = Path(input_file).parent
df = pd.read_csv(input_file, header=0, names=["text"])
samples[dataset_name] += df["text"].to_list()
sentences = []
for dataset, tweets in samples.items():
if len(tweets) > MAX_TWEETS_DATASET:
samples[dataset] = pd.DataFrame(tweets).sample(MAX_TWEETS_DATASET)[0].to_list()
sentences += samples[dataset]
df = pd.DataFrame(sentences, columns=["text"])
if args.drop_duplicates:
print("Dropping duplicates... go grab a ☕")
df = df.drop_duplicates(subset=["text"])
train_df = df.sample(frac=args.train_frac, random_state=args.seed)
val_df = df.drop(train_df.index)
train_df.to_csv(output_path.joinpath("train.csv"), index=None, header=0)
val_df.to_csv(output_path.joinpath("val.csv"), index=None, header=0)
if __name__ == "__main__":
main()
| en | 0.713577 | # Max tweets to get from a single file. R|Path to the input data. The directory should have the following structure: data_path/ dataset1/ train.csv dataset2/ file.csv datasetn/ random_name.csv | 3.189611 | 3 |
Sketchbots/sw/labqueue/lask/services/data_watchdog_svc.py | rlugojr/ChromeWebLab | 306 | 6620134 | <filename>Sketchbots/sw/labqueue/lask/services/data_watchdog_svc.py<gh_stars>100-1000
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Various service objects used to monitor and maintain data integrity,
such as deleting things which are obsolete.
"""
import logging
import time
from google.appengine.ext import db
from support.modeling import *
import datetime
from lask import core
import config
# from static_data import country_data
# from static_data import map_areas
# from static_data import test_lab_tag_ids
# from static_data import countries_to_continents
from math import floor
MAX_RECORDS_PER_ITERATIVE_FETCH = 200
"The maximum number of records to retrieve per call to Query.fetch() when iterating over potentially large batches of results"
MAX_RECORDS_CUMULATIVE_FETCH = 2000
"The maximum culumative number of records to retrieve over an iterative series of Query.fetch() calls"
# MAX_AGE_OF_TEMP_LDCS_SEC = 10 # test
MAX_AGE_OF_TEMP_LDCS_SEC = 7200 # production
"The maximum age of an LDC, in seconds"
X_DELETE_BLOBS = False
"Experimental! Whether or not to delete blobs explicitly when deleting their parent LDC's"
class TaskWatchdog(object):
""" Keeps an eye on Task objects
"""
@classmethod
def clean_tasks(cls):
""" Clears out "zombie" tasks
"""
logging.info('TaskWatchdog: Running clean_tasks()')
timeout = modeling_utcnow() - datetime.timedelta(seconds=config.TASK_RESERVATION_MAX_HOLD_TIME_SEC)
q = db.Query(core.model.Task)
q.filter('state =', core.model.TaskStateProperty.RESERVATION)
q.filter('created_at <', timeout)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
logging.info('TaskWatchdog: Found expired reserved Task %s in Topic %s (created by %s at %s), cancelling' % (str(rec.get_task_id()), rec.topic_name, rec.created_by, str(rec.created_at)))
rec.cancel_reservation(config.API_WORKER_GUID)
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('TaskWatchdog: Finished clean_tasks()')
return True
class LabDataContainerWatchdog(object):
""" Keeps an eye on LabDataContainer objects
"""
@classmethod
def propogate_moderation(cls):
""" Goes through the datastore looking for LDC's which have the mod_propogate
flag set to True and copies those LDC's mod_flagged and mod_rejected property
values to all child LDCs.
"""
logging.info('LabDataContainerWatchdog: Starting propogate_moderation()')
q = db.Query(core.model.LabDataContainer)
q.filter('mod_propogate =', True)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
# if rec.mod_flagged or rec.mod_rejected:
# only do this for records that are flagged or rejected
#
# change all of this LDC's children with deleted=False to deleted=True
#
logging.info('LabDataContainerWatchdog: Propoagting moderation properties from LabDataContainer '+rec.key().id_or_name()+' to all of its children')
q2 = db.Query(core.model.LabDataContainer)
q2.filter('ancestors =', rec.key())
#q2.filter('mod_propogate = ', False)
num_records_fetched2 = 0
run2 = True
while run2:
limit2 = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched2)
logging.info(limit2)
if limit2 > 0:
recs2 = q2.fetch(limit=limit2)
else:
recs2 = []
if len(recs2) > 0:
for rec2 in recs2:
if rec2.key() != rec.key():
# delete blob if necessary (do this first, before marking the record as culled)
# mark this child record as culled
rec2.mod_rejected = rec.mod_rejected
rec2.mod_approved = rec.mod_approved
rec2.mod_flagged = rec.mod_flagged
rec2.mod_rejected_at = rec.mod_rejected_at
rec2.mod_approved_at = rec.mod_approved_at
rec2.mod_flagged_at = rec.mod_flagged_at
rec.mod_propogate = False
rec2.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec2.key().id_or_name()+' set with mod_flagged = %s, mod_rejected = %s, mod_approved = %s' % (str(rec2.mod_flagged), str(rec2.mod_rejected), str(rec2.mod_approved)))
q2.with_cursor(q2.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run2 = False
#
# done
#
rec.mod_propogate = False
rec.put()
logging.info('LabDataContainerWatchdog: Done propogating moderation properties of LabDataContainer with key name '+rec.key().id_or_name()+'.')
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished propogate_moderation()')
return True
@classmethod
def cull(cls):
""" Goes through the datastore looking for LDC's which have their
deleted flag set to True, and makes sure that child LDC's are also
marked deleted.
"""
logging.info('LabDataContainerWatchdog: Starting cull()')
q = db.Query(core.model.LabDataContainer)
q.filter('deleted =', True)
q.filter('culled =', False)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
#
# change all of this LDC's children with deleted=False to deleted=True
#
logging.info('LabDataContainerWatchdog: LabDataContainer '+rec.key().id_or_name()+' is to be deleted, making sure child LDCs will also be deleted.')
q2 = db.Query(core.model.LabDataContainer)
q2.filter('ancestors = ', rec.key())
q2.filter('deleted = ', False)
num_records_fetched2 = 0
run2 = True
while run2:
limit2 = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched2)
if limit2 > 0:
recs2 = q2.fetch(limit=limit2)
else:
recs2 = []
if len(recs2) > 0:
for rec2 in recs2:
if rec2.key() != rec.key():
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec2.content_blob is not None:
rec2.content_blob.delete()
# mark this child record as culled
rec2.deleted = True
rec2.culled = True
rec2.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec2.key().id_or_name()+' marked deleted & culled')
q2.with_cursor(q2.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run2 = False
#
# done
#
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec.content_blob is not None:
rec.content_blob.delete()
# mark this record as culled
rec.culled = True
rec.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec.key().id_or_name()+' marked deleted & culled')
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished cull()')
return True
@classmethod
def clean_temp(cls):
""" Cleans out items from the /temp tree which are over a certain maximum age.
"""
logging.info('LabDataContainerWatchdog: Starting clean_temp()')
timeout = modeling_utcnow() - datetime.timedelta(seconds=MAX_AGE_OF_TEMP_LDCS_SEC)
q = db.Query(core.model.LabDataContainer)
k = core.model.LabDataPath('temp').get_key()
# logging.info(k.name())
# logging.info(timeout)
q.filter('ancestors =', k)
q.filter('deleted =', False)
q.filter('updated_at <', timeout)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
logging.info('Found '+str(len(recs))+' records')
if len(recs) > 0:
for rec in recs:
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec.content_blob is not None:
logging.info('LabDataContainerWatchdog: clean_temp() is deleting blob '+str(rec.content_blob)+' for LabDataContainer with key name '+rec.key().id_or_name())
rec.content_blob.delete()
rec.content_blob = None
# and flag the content for deletion by the regular deletion by LabDataContainerWatchdog.cull()
# rec.end_user_delete()
# actually delete the object
rec.delete()
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished clean_temp()')
return True
| <filename>Sketchbots/sw/labqueue/lask/services/data_watchdog_svc.py<gh_stars>100-1000
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Various service objects used to monitor and maintain data integrity,
such as deleting things which are obsolete.
"""
import logging
import time
from google.appengine.ext import db
from support.modeling import *
import datetime
from lask import core
import config
# from static_data import country_data
# from static_data import map_areas
# from static_data import test_lab_tag_ids
# from static_data import countries_to_continents
from math import floor
MAX_RECORDS_PER_ITERATIVE_FETCH = 200
"The maximum number of records to retrieve per call to Query.fetch() when iterating over potentially large batches of results"
MAX_RECORDS_CUMULATIVE_FETCH = 2000
"The maximum culumative number of records to retrieve over an iterative series of Query.fetch() calls"
# MAX_AGE_OF_TEMP_LDCS_SEC = 10 # test
MAX_AGE_OF_TEMP_LDCS_SEC = 7200 # production
"The maximum age of an LDC, in seconds"
X_DELETE_BLOBS = False
"Experimental! Whether or not to delete blobs explicitly when deleting their parent LDC's"
class TaskWatchdog(object):
""" Keeps an eye on Task objects
"""
@classmethod
def clean_tasks(cls):
""" Clears out "zombie" tasks
"""
logging.info('TaskWatchdog: Running clean_tasks()')
timeout = modeling_utcnow() - datetime.timedelta(seconds=config.TASK_RESERVATION_MAX_HOLD_TIME_SEC)
q = db.Query(core.model.Task)
q.filter('state =', core.model.TaskStateProperty.RESERVATION)
q.filter('created_at <', timeout)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
logging.info('TaskWatchdog: Found expired reserved Task %s in Topic %s (created by %s at %s), cancelling' % (str(rec.get_task_id()), rec.topic_name, rec.created_by, str(rec.created_at)))
rec.cancel_reservation(config.API_WORKER_GUID)
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('TaskWatchdog: Finished clean_tasks()')
return True
class LabDataContainerWatchdog(object):
""" Keeps an eye on LabDataContainer objects
"""
@classmethod
def propogate_moderation(cls):
""" Goes through the datastore looking for LDC's which have the mod_propogate
flag set to True and copies those LDC's mod_flagged and mod_rejected property
values to all child LDCs.
"""
logging.info('LabDataContainerWatchdog: Starting propogate_moderation()')
q = db.Query(core.model.LabDataContainer)
q.filter('mod_propogate =', True)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
# if rec.mod_flagged or rec.mod_rejected:
# only do this for records that are flagged or rejected
#
# change all of this LDC's children with deleted=False to deleted=True
#
logging.info('LabDataContainerWatchdog: Propoagting moderation properties from LabDataContainer '+rec.key().id_or_name()+' to all of its children')
q2 = db.Query(core.model.LabDataContainer)
q2.filter('ancestors =', rec.key())
#q2.filter('mod_propogate = ', False)
num_records_fetched2 = 0
run2 = True
while run2:
limit2 = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched2)
logging.info(limit2)
if limit2 > 0:
recs2 = q2.fetch(limit=limit2)
else:
recs2 = []
if len(recs2) > 0:
for rec2 in recs2:
if rec2.key() != rec.key():
# delete blob if necessary (do this first, before marking the record as culled)
# mark this child record as culled
rec2.mod_rejected = rec.mod_rejected
rec2.mod_approved = rec.mod_approved
rec2.mod_flagged = rec.mod_flagged
rec2.mod_rejected_at = rec.mod_rejected_at
rec2.mod_approved_at = rec.mod_approved_at
rec2.mod_flagged_at = rec.mod_flagged_at
rec.mod_propogate = False
rec2.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec2.key().id_or_name()+' set with mod_flagged = %s, mod_rejected = %s, mod_approved = %s' % (str(rec2.mod_flagged), str(rec2.mod_rejected), str(rec2.mod_approved)))
q2.with_cursor(q2.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run2 = False
#
# done
#
rec.mod_propogate = False
rec.put()
logging.info('LabDataContainerWatchdog: Done propogating moderation properties of LabDataContainer with key name '+rec.key().id_or_name()+'.')
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished propogate_moderation()')
return True
@classmethod
def cull(cls):
""" Goes through the datastore looking for LDC's which have their
deleted flag set to True, and makes sure that child LDC's are also
marked deleted.
"""
logging.info('LabDataContainerWatchdog: Starting cull()')
q = db.Query(core.model.LabDataContainer)
q.filter('deleted =', True)
q.filter('culled =', False)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
#logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
if len(recs) > 0:
for rec in recs:
#
# change all of this LDC's children with deleted=False to deleted=True
#
logging.info('LabDataContainerWatchdog: LabDataContainer '+rec.key().id_or_name()+' is to be deleted, making sure child LDCs will also be deleted.')
q2 = db.Query(core.model.LabDataContainer)
q2.filter('ancestors = ', rec.key())
q2.filter('deleted = ', False)
num_records_fetched2 = 0
run2 = True
while run2:
limit2 = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched2)
if limit2 > 0:
recs2 = q2.fetch(limit=limit2)
else:
recs2 = []
if len(recs2) > 0:
for rec2 in recs2:
if rec2.key() != rec.key():
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec2.content_blob is not None:
rec2.content_blob.delete()
# mark this child record as culled
rec2.deleted = True
rec2.culled = True
rec2.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec2.key().id_or_name()+' marked deleted & culled')
q2.with_cursor(q2.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run2 = False
#
# done
#
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec.content_blob is not None:
rec.content_blob.delete()
# mark this record as culled
rec.culled = True
rec.put()
logging.info('LabDataContainerWatchdog: LabDataContainer with key name '+rec.key().id_or_name()+' marked deleted & culled')
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished cull()')
return True
@classmethod
def clean_temp(cls):
""" Cleans out items from the /temp tree which are over a certain maximum age.
"""
logging.info('LabDataContainerWatchdog: Starting clean_temp()')
timeout = modeling_utcnow() - datetime.timedelta(seconds=MAX_AGE_OF_TEMP_LDCS_SEC)
q = db.Query(core.model.LabDataContainer)
k = core.model.LabDataPath('temp').get_key()
# logging.info(k.name())
# logging.info(timeout)
q.filter('ancestors =', k)
q.filter('deleted =', False)
q.filter('updated_at <', timeout)
num_records_fetched = 0
run = True
while run:
# get more records to inspect
limit = min(MAX_RECORDS_PER_ITERATIVE_FETCH, MAX_RECORDS_CUMULATIVE_FETCH - num_records_fetched)
num_records_fetched += limit
if limit > 0:
logging.info('Fetching new batch of records, limit='+str(limit))
recs = q.fetch(limit=limit)
else:
recs = []
logging.info('Found '+str(len(recs))+' records')
if len(recs) > 0:
for rec in recs:
# delete blob if necessary (do this first, before marking the record as culled)
if X_DELETE_BLOBS and rec.content_blob is not None:
logging.info('LabDataContainerWatchdog: clean_temp() is deleting blob '+str(rec.content_blob)+' for LabDataContainer with key name '+rec.key().id_or_name())
rec.content_blob.delete()
rec.content_blob = None
# and flag the content for deletion by the regular deletion by LabDataContainerWatchdog.cull()
# rec.end_user_delete()
# actually delete the object
rec.delete()
q.with_cursor(q.cursor()) # use a cursor to make the next fetch pick up where this one ended
else:
# no more records
run = False
logging.info('LabDataContainerWatchdog: Finished clean_temp()')
return True
| en | 0.877237 | # Copyright 2013 Google Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Various service objects used to monitor and maintain data integrity, such as deleting things which are obsolete. # from static_data import country_data # from static_data import map_areas # from static_data import test_lab_tag_ids # from static_data import countries_to_continents # MAX_AGE_OF_TEMP_LDCS_SEC = 10 # test # production Keeps an eye on Task objects Clears out "zombie" tasks # get more records to inspect #logging.info('Fetching new batch of records, limit='+str(limit)) # use a cursor to make the next fetch pick up where this one ended # no more records Keeps an eye on LabDataContainer objects Goes through the datastore looking for LDC's which have the mod_propogate flag set to True and copies those LDC's mod_flagged and mod_rejected property values to all child LDCs. # get more records to inspect #logging.info('Fetching new batch of records, limit='+str(limit)) # if rec.mod_flagged or rec.mod_rejected: # only do this for records that are flagged or rejected # # change all of this LDC's children with deleted=False to deleted=True # #q2.filter('mod_propogate = ', False) # delete blob if necessary (do this first, before marking the record as culled) # mark this child record as culled # use a cursor to make the next fetch pick up where this one ended # no more records # # done # # use a cursor to make the next fetch pick up where this one ended # no more records Goes through the datastore looking for LDC's which have their deleted flag set to True, and makes sure that child LDC's are also marked deleted. # get more records to inspect #logging.info('Fetching new batch of records, limit='+str(limit)) # # change all of this LDC's children with deleted=False to deleted=True # # delete blob if necessary (do this first, before marking the record as culled) # mark this child record as culled # use a cursor to make the next fetch pick up where this one ended # no more records # # done # # delete blob if necessary (do this first, before marking the record as culled) # mark this record as culled # use a cursor to make the next fetch pick up where this one ended # no more records Cleans out items from the /temp tree which are over a certain maximum age. # logging.info(k.name()) # logging.info(timeout) # get more records to inspect # delete blob if necessary (do this first, before marking the record as culled) # and flag the content for deletion by the regular deletion by LabDataContainerWatchdog.cull() # rec.end_user_delete() # actually delete the object # use a cursor to make the next fetch pick up where this one ended # no more records | 2.045713 | 2 |
pyuniqid/uniqid.py | boriskurikhin/pyuniqid | 1 | 6620135 | """Unique ID module.
Consists of the unique id generator, and necessary but hidden
utility functions.
Typical usage example:
# Simple.
uniqid()
# With a prefix.
uniqid('hello-')
# With a prefix and a postfix.
uniqid('hello-', '-goodbye')
# With only a postfix.
uniqid('', '-goodbye')
"""
import os
import time
import netifaces
from numpy import base_repr
def __get_pid():
return os.getpid()
def __get_netifaces():
"""Retrieves an appropriate MAC address.
Returns:
A string containing the MAC address.
"""
network_interfaces = netifaces.interfaces()
for ni in network_interfaces:
nif = netifaces.ifaddresses(ni)
if ni == 'lo' or netifaces.AF_LINK not in nif:
continue
return netifaces.ifaddresses(ni)[netifaces.AF_LINK][0]['addr']
return '0'
def __get_mac():
"""Returns MAC address as an integer.
Strips all non-integer characters from the MAC address and returns the
result.
Returns:
Integer version of the MAC address.
"""
mac = __get_netifaces()
return int(''.join(list(filter(lambda x: x.isdigit(), list(mac)))))
def __get_time():
return int(time.time() * 1000)
def __tob36(item):
"""Converts an item to base 36.
Args:
item: The item to convert, ideally integer.
Returns:
The item converted into base 36 format.
"""
item_int = int(item)
return base_repr(item_int, 36)
def uniqid(prefix='', postfix=''):
"""Generates a unique id.
Combination of MAC address, process ID and time to generate a unique id.
Args:
prefix: Optional string prefix for the ID, appearing at the beginning.
postfix: Optional string postfix for the ID, appearing at the end.
Returns:
A unique ID, as a string.
"""
return ''.join([
prefix,
__tob36(__get_mac()),
__tob36(__get_pid()),
__tob36(__get_time()),
postfix
]).lower()
| """Unique ID module.
Consists of the unique id generator, and necessary but hidden
utility functions.
Typical usage example:
# Simple.
uniqid()
# With a prefix.
uniqid('hello-')
# With a prefix and a postfix.
uniqid('hello-', '-goodbye')
# With only a postfix.
uniqid('', '-goodbye')
"""
import os
import time
import netifaces
from numpy import base_repr
def __get_pid():
return os.getpid()
def __get_netifaces():
"""Retrieves an appropriate MAC address.
Returns:
A string containing the MAC address.
"""
network_interfaces = netifaces.interfaces()
for ni in network_interfaces:
nif = netifaces.ifaddresses(ni)
if ni == 'lo' or netifaces.AF_LINK not in nif:
continue
return netifaces.ifaddresses(ni)[netifaces.AF_LINK][0]['addr']
return '0'
def __get_mac():
"""Returns MAC address as an integer.
Strips all non-integer characters from the MAC address and returns the
result.
Returns:
Integer version of the MAC address.
"""
mac = __get_netifaces()
return int(''.join(list(filter(lambda x: x.isdigit(), list(mac)))))
def __get_time():
return int(time.time() * 1000)
def __tob36(item):
"""Converts an item to base 36.
Args:
item: The item to convert, ideally integer.
Returns:
The item converted into base 36 format.
"""
item_int = int(item)
return base_repr(item_int, 36)
def uniqid(prefix='', postfix=''):
"""Generates a unique id.
Combination of MAC address, process ID and time to generate a unique id.
Args:
prefix: Optional string prefix for the ID, appearing at the beginning.
postfix: Optional string postfix for the ID, appearing at the end.
Returns:
A unique ID, as a string.
"""
return ''.join([
prefix,
__tob36(__get_mac()),
__tob36(__get_pid()),
__tob36(__get_time()),
postfix
]).lower()
| en | 0.66361 | Unique ID module. Consists of the unique id generator, and necessary but hidden utility functions. Typical usage example: # Simple. uniqid() # With a prefix. uniqid('hello-') # With a prefix and a postfix. uniqid('hello-', '-goodbye') # With only a postfix. uniqid('', '-goodbye') Retrieves an appropriate MAC address. Returns: A string containing the MAC address. Returns MAC address as an integer. Strips all non-integer characters from the MAC address and returns the result. Returns: Integer version of the MAC address. Converts an item to base 36. Args: item: The item to convert, ideally integer. Returns: The item converted into base 36 format. Generates a unique id. Combination of MAC address, process ID and time to generate a unique id. Args: prefix: Optional string prefix for the ID, appearing at the beginning. postfix: Optional string postfix for the ID, appearing at the end. Returns: A unique ID, as a string. | 3.34672 | 3 |
CursoEmVideoPython/desafio70.py | miguelabreuss/scripts_python | 0 | 6620136 | <reponame>miguelabreuss/scripts_python
nome = flag = mais_barato = ''
preco = total = count_prod = 0
menor_preco = 999999999999
while True:
print('-' * 30)
print('REGISTRAR NOVO PRODUTO')
print('-' * 30)
nome = str(input('Qual o nome do produto? '))
preco = float(input(f'Qual o preço do produto {nome}? '))
total += preco
if preco >= 1000:
count_prod +=1
if preco < menor_preco:
mais_barato = nome
menor_preco = preco
while True:
flag = str(input('Deseja continuar [S/N]? '))
if flag in 'SsNn':
break
if flag in 'Nn':
break
print('-' * 30)
print('''RESULTADO DA COMPRA
''')
print(f'O total gasto na compra foi de \33[:31mR${total:.2f}\33[m.')
print(f'\33[:34m{count_prod}\33[m produtos encontrados acima de \33[4mR$1.000,00\33[m')
print(f'O produto mais barato foi \33[:32m{mais_barato}\33[m, que custa \33[:32mR${menor_preco:.2f}\33[m.') | nome = flag = mais_barato = ''
preco = total = count_prod = 0
menor_preco = 999999999999
while True:
print('-' * 30)
print('REGISTRAR NOVO PRODUTO')
print('-' * 30)
nome = str(input('Qual o nome do produto? '))
preco = float(input(f'Qual o preço do produto {nome}? '))
total += preco
if preco >= 1000:
count_prod +=1
if preco < menor_preco:
mais_barato = nome
menor_preco = preco
while True:
flag = str(input('Deseja continuar [S/N]? '))
if flag in 'SsNn':
break
if flag in 'Nn':
break
print('-' * 30)
print('''RESULTADO DA COMPRA
''')
print(f'O total gasto na compra foi de \33[:31mR${total:.2f}\33[m.')
print(f'\33[:34m{count_prod}\33[m produtos encontrados acima de \33[4mR$1.000,00\33[m')
print(f'O produto mais barato foi \33[:32m{mais_barato}\33[m, que custa \33[:32mR${menor_preco:.2f}\33[m.') | es | 0.353852 | RESULTADO DA COMPRA | 3.429438 | 3 |
tests/test_client.py | MLAide/python-client | 1 | 6620137 | <reponame>MLAide/python-client
from pytest_mock.plugin import MockerFixture
import pytest
from mlaide import MLAideClient, ConnectionOptions, ModelStage
@pytest.fixture
def mock_authenticated_client(mocker: MockerFixture):
return mocker.patch('mlaide.client.AuthenticatedClient')
@pytest.fixture
def mock_active_run(mocker: MockerFixture):
return mocker.patch('mlaide.client.ActiveRun')
@pytest.fixture
def mock_active_artifact(mocker: MockerFixture):
return mocker.patch('mlaide.client.ActiveArtifact')
@pytest.fixture
def mock_get_git_metadata(mocker: MockerFixture):
return mocker.patch('mlaide.client.get_git_metadata')
def test_init_should_raise_value_error_if_project_key_is_none():
with pytest.raises(ValueError):
# noinspection PyTypeChecker
MLAideClient(None)
def test_init_should_use_default_options_if_no_options_provided(monkeypatch):
# arrange
monkeypatch.setenv('MLAIDE_API_KEY', 'the api key')
# act
client = MLAideClient('project key', options=None)
# assert
options = client.options
assert options.api_key == 'the api key'
assert options.server_url == 'http://localhost:9000/api/v1'
def test_init_should_use_merge_provided_options_with_default_options(monkeypatch):
# arrange
monkeypatch.setenv('MLAIDE_API_KEY', 'the api key')
# act
client = MLAideClient('project key', options=ConnectionOptions(server_url='http://my-server.com'))
# assert
options = client.options
assert options.api_key == 'the api key'
assert options.server_url == 'http://my-server.com'
def test_init_should_create_authenticated_client(mock_authenticated_client):
# act
client = MLAideClient('project key', options=ConnectionOptions(server_url='http://my-server.com', api_key='the key'))
# assert
mock_authenticated_client.assert_called_once_with(base_url='http://my-server.com', api_key='the key')
assert client.api_client == mock_authenticated_client.return_value
def test_start_new_run_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=True)
assert active_run == mock_active_run.return_value
def test_start_new_run_and_do_not_auto_create_experiment_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts, False)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=False)
assert active_run == mock_active_run.return_value
def test_start_new_run_and_do_auto_create_experiment_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts, True)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=True)
assert active_run == mock_active_run.return_value
def test_get_artifact_should_instantiate_new_active_artifact_with_correct_arguments(
mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
# act
active_artifact = client.get_artifact('a name', 5)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'a name', 5)
assert active_artifact == mock_active_artifact.return_value
def test_load_model_should_instantiate_new_active_artifact_with_correct_arguments_and_return_result_of_load_model(
mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
mock_active_artifact.return_value.load_model.return_value = "the deserialized model"
# act
model = client.load_model('model name', 7)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'model name', 7, None)
assert model == "the deserialized model"
def test_load_model_should_pass_stage_to_active_artifact(mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
# act
client.load_model('model name', stage=ModelStage.PRODUCTION)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'model name', None, ModelStage.PRODUCTION)
def test_load_model_should_raise_error_when_version_and_stage_are_defined():
# arrange
client = MLAideClient('project key')
# act
with pytest.raises(ValueError):
client.load_model('model name', version=3, stage=ModelStage.PRODUCTION)
| from pytest_mock.plugin import MockerFixture
import pytest
from mlaide import MLAideClient, ConnectionOptions, ModelStage
@pytest.fixture
def mock_authenticated_client(mocker: MockerFixture):
return mocker.patch('mlaide.client.AuthenticatedClient')
@pytest.fixture
def mock_active_run(mocker: MockerFixture):
return mocker.patch('mlaide.client.ActiveRun')
@pytest.fixture
def mock_active_artifact(mocker: MockerFixture):
return mocker.patch('mlaide.client.ActiveArtifact')
@pytest.fixture
def mock_get_git_metadata(mocker: MockerFixture):
return mocker.patch('mlaide.client.get_git_metadata')
def test_init_should_raise_value_error_if_project_key_is_none():
with pytest.raises(ValueError):
# noinspection PyTypeChecker
MLAideClient(None)
def test_init_should_use_default_options_if_no_options_provided(monkeypatch):
# arrange
monkeypatch.setenv('MLAIDE_API_KEY', 'the api key')
# act
client = MLAideClient('project key', options=None)
# assert
options = client.options
assert options.api_key == 'the api key'
assert options.server_url == 'http://localhost:9000/api/v1'
def test_init_should_use_merge_provided_options_with_default_options(monkeypatch):
# arrange
monkeypatch.setenv('MLAIDE_API_KEY', 'the api key')
# act
client = MLAideClient('project key', options=ConnectionOptions(server_url='http://my-server.com'))
# assert
options = client.options
assert options.api_key == 'the api key'
assert options.server_url == 'http://my-server.com'
def test_init_should_create_authenticated_client(mock_authenticated_client):
# act
client = MLAideClient('project key', options=ConnectionOptions(server_url='http://my-server.com', api_key='the key'))
# assert
mock_authenticated_client.assert_called_once_with(base_url='http://my-server.com', api_key='the key')
assert client.api_client == mock_authenticated_client.return_value
def test_start_new_run_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=True)
assert active_run == mock_active_run.return_value
def test_start_new_run_and_do_not_auto_create_experiment_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts, False)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=False)
assert active_run == mock_active_run.return_value
def test_start_new_run_and_do_auto_create_experiment_should_instantiate_new_active_run_with_correct_arguments(
mock_authenticated_client, mock_active_run, mock_get_git_metadata):
# arrange
client = MLAideClient('project key')
used_artifacts = []
# act
active_run = client.start_new_run('experiment key', 'run name', used_artifacts, True)
# assert
mock_active_run.assert_called_once_with(
api_client=mock_authenticated_client.return_value,
project_key='project key',
run_name='run name',
git=mock_get_git_metadata.return_value,
experiment_key='experiment key',
used_artifacts=used_artifacts,
auto_create_experiment=True)
assert active_run == mock_active_run.return_value
def test_get_artifact_should_instantiate_new_active_artifact_with_correct_arguments(
mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
# act
active_artifact = client.get_artifact('a name', 5)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'a name', 5)
assert active_artifact == mock_active_artifact.return_value
def test_load_model_should_instantiate_new_active_artifact_with_correct_arguments_and_return_result_of_load_model(
mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
mock_active_artifact.return_value.load_model.return_value = "the deserialized model"
# act
model = client.load_model('model name', 7)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'model name', 7, None)
assert model == "the deserialized model"
def test_load_model_should_pass_stage_to_active_artifact(mock_authenticated_client, mock_active_artifact):
# arrange
client = MLAideClient('project key')
# act
client.load_model('model name', stage=ModelStage.PRODUCTION)
# assert
mock_active_artifact.assert_called_once_with(
mock_authenticated_client.return_value, 'project key', 'model name', None, ModelStage.PRODUCTION)
def test_load_model_should_raise_error_when_version_and_stage_are_defined():
# arrange
client = MLAideClient('project key')
# act
with pytest.raises(ValueError):
client.load_model('model name', version=3, stage=ModelStage.PRODUCTION) | en | 0.700609 | # noinspection PyTypeChecker # arrange # act # assert # arrange # act # assert # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act | 1.993231 | 2 |
python/plot_voronoi3d.py | yuyttenhove/cVoronoi | 0 | 6620138 | <filename>python/plot_voronoi3d.py
import mpl_toolkits.mplot3d as a3
from matplotlib import pylab as pl
import numpy as np
def plot_voronoi(generators, vertices):
fig = pl.figure()
axes = fig.add_subplot(111, projection='3d')
# axes = a3.Axes3D(pl.figure())
poly3dcollection = a3.art3d.Poly3DCollection(vertices, facecolors="g", linewidth=1, alpha=0.3)
poly3dcollection.set_edgecolor("k")
# poly3dcollection.set_alpha(1)
# poly3dcollection.set_color('grey')
axes.add_collection3d(poly3dcollection)
axes.plot(generators[:, 0], generators[:, 1], generators[:, 2], 'ko')
# axes.set_axis_off()
axes.set_xlim([-.1, 1.1])
axes.set_ylim([-.1, 1.1])
axes.set_zlim([-.1, 1.1])
pl.show()
def main(fname):
with open(fname, "r") as file:
lines = file.readlines()
lines = [line[:-1].split("\t") for line in lines]
generators = np.stack([np.array(line[1:]) for line in lines if line[0] == "G"]).astype(float)
centroids = [line for line in lines if line[0] == "C"]
volumes = np.array([np.array(line[-2]) for line in centroids]).astype(float)
n_neighbours = np.array([np.array(line[-1]) for line in centroids]).astype(int)
centroids = np.stack([np.array(line[1:-2]) for line in centroids]).astype(float)
faces = [line[1:] for line in lines if line[0] == "F"]
sid = np.array([np.array(line[0]) for line in faces]).astype(int)
areas = np.array([np.array(line[1]) for line in faces]).astype(float)
midpoints = np.stack([np.array(line[2:5]) for line in faces]).astype(float)
vertices = [np.stack([np.array(c[1:-1].split(", ")) for c in line[5:]]).astype(float) for line in faces]
plot_voronoi(generators, vertices)
if __name__ == "__main__":
main("vtest001.txt")
| <filename>python/plot_voronoi3d.py
import mpl_toolkits.mplot3d as a3
from matplotlib import pylab as pl
import numpy as np
def plot_voronoi(generators, vertices):
fig = pl.figure()
axes = fig.add_subplot(111, projection='3d')
# axes = a3.Axes3D(pl.figure())
poly3dcollection = a3.art3d.Poly3DCollection(vertices, facecolors="g", linewidth=1, alpha=0.3)
poly3dcollection.set_edgecolor("k")
# poly3dcollection.set_alpha(1)
# poly3dcollection.set_color('grey')
axes.add_collection3d(poly3dcollection)
axes.plot(generators[:, 0], generators[:, 1], generators[:, 2], 'ko')
# axes.set_axis_off()
axes.set_xlim([-.1, 1.1])
axes.set_ylim([-.1, 1.1])
axes.set_zlim([-.1, 1.1])
pl.show()
def main(fname):
with open(fname, "r") as file:
lines = file.readlines()
lines = [line[:-1].split("\t") for line in lines]
generators = np.stack([np.array(line[1:]) for line in lines if line[0] == "G"]).astype(float)
centroids = [line for line in lines if line[0] == "C"]
volumes = np.array([np.array(line[-2]) for line in centroids]).astype(float)
n_neighbours = np.array([np.array(line[-1]) for line in centroids]).astype(int)
centroids = np.stack([np.array(line[1:-2]) for line in centroids]).astype(float)
faces = [line[1:] for line in lines if line[0] == "F"]
sid = np.array([np.array(line[0]) for line in faces]).astype(int)
areas = np.array([np.array(line[1]) for line in faces]).astype(float)
midpoints = np.stack([np.array(line[2:5]) for line in faces]).astype(float)
vertices = [np.stack([np.array(c[1:-1].split(", ")) for c in line[5:]]).astype(float) for line in faces]
plot_voronoi(generators, vertices)
if __name__ == "__main__":
main("vtest001.txt")
| en | 0.094706 | # axes = a3.Axes3D(pl.figure()) # poly3dcollection.set_alpha(1) # poly3dcollection.set_color('grey') # axes.set_axis_off() | 3.039329 | 3 |
scripts/cam_visualizations.py | mmaaz60/ssl_for_fgvc | 10 | 6620139 | import sys
import os
import argparse
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
# Add the root folder (ssl_for_fgvc) as the path
sys.path.append(f"{'/'.join(os.getcwd().split('/')[:-1])}")
from config.config import Configuration as config
from dataloader.common import Dataloader
from model.common import Model
from utils.util import get_object_from_path
class CAMVisualization:
"""
The class implements the process of getting a cam visualization of an image for a specified model.
"""
def __init__(self, model, model_name, cam_method='GradCAM'):
"""
Constructor, the function initializes the class variables.
:param model: Model to be used for the CAM visualization
:param model_name: Model name (as per config.yml)
:param cam_method: The method to be used for CAM calculation. Available options are
"GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM"
"""
self.model = model.eval() # Put the model in the evaluation mode
self.model_name = model_name # The model name (as per the config.yml)
self.cam_method = cam_method # The cam method
self.target_layer = None # The target layer used to calculate the CAM
self.cam = None # The calculated CAM
self._set_target_layer() # Set the target layer as per the specified model name
self._set_cam() # Set cam as per the specified cam method
def _set_target_layer(self):
"""
The function selects the target layer as per the specified model name.
"""
if self.model_name == "torchvision" or self.model_name == "torchvision_ssl_rotation":
self.target_layer = self.model.model.layer4[-1]
elif self.model_name == "torchvision_ssl_pirl":
self.target_layer = self.model.feature_extractor[-2][-1]
elif self.model_name == "dcl":
self.target_layer = self.model.feature_extractor[-1][-1]
else:
print(f"Given model ({self.model_name}) is not supported. Exiting!")
sys.exit(1)
def _set_cam(self):
"""
The function selects the cam visualization method specified by cam_method
"""
if self.cam_method == "GradCAM":
self.cam = GradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "GradCAMPlusPlus":
self.cam = GradCAMPlusPlus(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "ScoreCAM":
self.cam = ScoreCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "AblationCAM":
self.cam = AblationCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "XGradCAM":
self.cam = XGradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
else:
self.cam = GradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
def get_cam_image(self, x, x_orig):
"""
The function interpolates the class activation maps and return an image of required size
:param x: Batch of images (b, c, h, w)
"""
grayscale_cam = self.cam(input_tensor=x, target_category=1)
visualization = show_cam_on_image(np.array(x_orig, dtype=np.float32) / 255.0, grayscale_cam, use_rgb=True)
pil_image = Image.fromarray(visualization)
# Get the classification label
cls_scores = self.model(x)
_, label = torch.max(cls_scores, 1)
return pil_image, int(label.detach())
def parse_arguments():
"""
Parse the command line arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-config", "--config_path", required=True,
help="The path to the pipeline .yml configuration file.")
ap.add_argument("-cam", "--cam_method", required=False, default='GradCAM',
help="Cam method to use. Possible options are "
"[GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM]")
ap.add_argument("-checkpoints", "--model_checkpoints", required=True,
help="The path to model checkpoints.")
ap.add_argument("-dataset", "--root_dataset_path", required=False, default="./data/CUB_200_2011",
help="The path to the dataset root directory. "
"The program will download the dataset if not present locally.")
ap.add_argument("-save", "--output_directory", required=True,
help="The path to output directory to save the visualizations.")
ap.add_argument("-dim", "--output_dim", type=int, required=False, default=448,
help="The output dimensions of the images overlayed with CAMs.")
ap.add_argument("-d", "--device", required=False, default='cuda',
help="The computation device to perform operations ('cpu', 'cuda')")
args = vars(ap.parse_args())
return args
def main():
"""
Implements the main flow, i.e. load the dataset & model, generate cam visualizations and save the visualizations
"""
args = parse_arguments() # Parse arguments
# Create the output directory if not exists
if not os.path.exists(args["output_directory"]):
os.makedirs(args["output_directory"])
if not os.path.exists(f"{args['output_directory']}/correct_predictions"):
os.mkdir(f"{args['output_directory']}/correct_predictions")
if not os.path.exists(f"{args['output_directory']}/wrong_predictions"):
os.mkdir(f"{args['output_directory']}/wrong_predictions")
config.load_config(args["config_path"]) # Load configuration
config.cfg["dataloader"]["root_directory_path"] = args["root_dataset_path"] # Set the dataset path
_, test_loader = Dataloader(config=config).get_loader() # Create dataloader
# Get the required attributes from the dataset
data = test_loader.dataset.data.values
data = data[np.argsort(data[:, 0])]
image_ids = data[:, 0]
test_image_paths = data[:, 1]
test_image_labels = data[:, 2]
# Create the model
model = Model(config=config).get_model()
model = model.to(args["device"])
# Load pretrained weights
checkpoints_path = args["model_checkpoints"]
checkpoints = torch.load(checkpoints_path)
model.load_state_dict(checkpoints["state_dict"], strict=True)
# Create CAM visualizer object
visualizer = CAMVisualization(model, config.cfg["model"]["name"], cam_method=args["cam_method"])
# Create transforms for performing inference
resize_dim = (config.cfg["dataloader"]["resize_width"], config.cfg["dataloader"]["resize_height"])
infer_dim = args["output_dim"]
test_transforms = config.cfg["dataloader"]["transforms"]["test"]
test_transform = transforms.Compose(
[
get_object_from_path(test_transforms[i]['path'])(**test_transforms[i]['param'])
if 'param' in test_transforms[i].keys()
else get_object_from_path(test_transforms[i]['path'])() for i in test_transforms.keys()
]
)
# Iterate over the dataset
for image_info in zip(image_ids, test_image_paths, test_image_labels):
image_id, image_path, image_label = image_info
full_path = os.path.join(config.cfg["dataloader"]["root_directory_path"],
"CUB_200_2011/images", image_path)
input = Image.open(full_path).convert('RGB')
input = input.resize(resize_dim, Image.ANTIALIAS)
input_trans = test_transform(input) # Transform the image
input_trans = torch.unsqueeze(input_trans, 0)
input_trans = input_trans.to(args["device"])
# Get the cam image
output_image, predicted_label = visualizer.get_cam_image(input_trans,
input.resize((infer_dim, infer_dim), Image.ANTIALIAS))
# Write the cam images to the disc
predicted_label += 1
if predicted_label == image_label:
# Save the PIL image
output_image.save(f"{args['output_directory']}/correct_predictions/"
f"{image_id}_{image_label}_{predicted_label}.jpg")
else:
# Save the PIL image
output_image.save(f"{args['output_directory']}/wrong_predictions/"
f"{image_id}_{image_label}_{predicted_label}.jpg")
if __name__ == "__main__":
main()
| import sys
import os
import argparse
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
# Add the root folder (ssl_for_fgvc) as the path
sys.path.append(f"{'/'.join(os.getcwd().split('/')[:-1])}")
from config.config import Configuration as config
from dataloader.common import Dataloader
from model.common import Model
from utils.util import get_object_from_path
class CAMVisualization:
"""
The class implements the process of getting a cam visualization of an image for a specified model.
"""
def __init__(self, model, model_name, cam_method='GradCAM'):
"""
Constructor, the function initializes the class variables.
:param model: Model to be used for the CAM visualization
:param model_name: Model name (as per config.yml)
:param cam_method: The method to be used for CAM calculation. Available options are
"GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM"
"""
self.model = model.eval() # Put the model in the evaluation mode
self.model_name = model_name # The model name (as per the config.yml)
self.cam_method = cam_method # The cam method
self.target_layer = None # The target layer used to calculate the CAM
self.cam = None # The calculated CAM
self._set_target_layer() # Set the target layer as per the specified model name
self._set_cam() # Set cam as per the specified cam method
def _set_target_layer(self):
"""
The function selects the target layer as per the specified model name.
"""
if self.model_name == "torchvision" or self.model_name == "torchvision_ssl_rotation":
self.target_layer = self.model.model.layer4[-1]
elif self.model_name == "torchvision_ssl_pirl":
self.target_layer = self.model.feature_extractor[-2][-1]
elif self.model_name == "dcl":
self.target_layer = self.model.feature_extractor[-1][-1]
else:
print(f"Given model ({self.model_name}) is not supported. Exiting!")
sys.exit(1)
def _set_cam(self):
"""
The function selects the cam visualization method specified by cam_method
"""
if self.cam_method == "GradCAM":
self.cam = GradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "GradCAMPlusPlus":
self.cam = GradCAMPlusPlus(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "ScoreCAM":
self.cam = ScoreCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "AblationCAM":
self.cam = AblationCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
elif self.cam_method == "XGradCAM":
self.cam = XGradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
else:
self.cam = GradCAM(model=self.model, target_layer=self.target_layer, use_cuda=True)
def get_cam_image(self, x, x_orig):
"""
The function interpolates the class activation maps and return an image of required size
:param x: Batch of images (b, c, h, w)
"""
grayscale_cam = self.cam(input_tensor=x, target_category=1)
visualization = show_cam_on_image(np.array(x_orig, dtype=np.float32) / 255.0, grayscale_cam, use_rgb=True)
pil_image = Image.fromarray(visualization)
# Get the classification label
cls_scores = self.model(x)
_, label = torch.max(cls_scores, 1)
return pil_image, int(label.detach())
def parse_arguments():
"""
Parse the command line arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-config", "--config_path", required=True,
help="The path to the pipeline .yml configuration file.")
ap.add_argument("-cam", "--cam_method", required=False, default='GradCAM',
help="Cam method to use. Possible options are "
"[GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM]")
ap.add_argument("-checkpoints", "--model_checkpoints", required=True,
help="The path to model checkpoints.")
ap.add_argument("-dataset", "--root_dataset_path", required=False, default="./data/CUB_200_2011",
help="The path to the dataset root directory. "
"The program will download the dataset if not present locally.")
ap.add_argument("-save", "--output_directory", required=True,
help="The path to output directory to save the visualizations.")
ap.add_argument("-dim", "--output_dim", type=int, required=False, default=448,
help="The output dimensions of the images overlayed with CAMs.")
ap.add_argument("-d", "--device", required=False, default='cuda',
help="The computation device to perform operations ('cpu', 'cuda')")
args = vars(ap.parse_args())
return args
def main():
"""
Implements the main flow, i.e. load the dataset & model, generate cam visualizations and save the visualizations
"""
args = parse_arguments() # Parse arguments
# Create the output directory if not exists
if not os.path.exists(args["output_directory"]):
os.makedirs(args["output_directory"])
if not os.path.exists(f"{args['output_directory']}/correct_predictions"):
os.mkdir(f"{args['output_directory']}/correct_predictions")
if not os.path.exists(f"{args['output_directory']}/wrong_predictions"):
os.mkdir(f"{args['output_directory']}/wrong_predictions")
config.load_config(args["config_path"]) # Load configuration
config.cfg["dataloader"]["root_directory_path"] = args["root_dataset_path"] # Set the dataset path
_, test_loader = Dataloader(config=config).get_loader() # Create dataloader
# Get the required attributes from the dataset
data = test_loader.dataset.data.values
data = data[np.argsort(data[:, 0])]
image_ids = data[:, 0]
test_image_paths = data[:, 1]
test_image_labels = data[:, 2]
# Create the model
model = Model(config=config).get_model()
model = model.to(args["device"])
# Load pretrained weights
checkpoints_path = args["model_checkpoints"]
checkpoints = torch.load(checkpoints_path)
model.load_state_dict(checkpoints["state_dict"], strict=True)
# Create CAM visualizer object
visualizer = CAMVisualization(model, config.cfg["model"]["name"], cam_method=args["cam_method"])
# Create transforms for performing inference
resize_dim = (config.cfg["dataloader"]["resize_width"], config.cfg["dataloader"]["resize_height"])
infer_dim = args["output_dim"]
test_transforms = config.cfg["dataloader"]["transforms"]["test"]
test_transform = transforms.Compose(
[
get_object_from_path(test_transforms[i]['path'])(**test_transforms[i]['param'])
if 'param' in test_transforms[i].keys()
else get_object_from_path(test_transforms[i]['path'])() for i in test_transforms.keys()
]
)
# Iterate over the dataset
for image_info in zip(image_ids, test_image_paths, test_image_labels):
image_id, image_path, image_label = image_info
full_path = os.path.join(config.cfg["dataloader"]["root_directory_path"],
"CUB_200_2011/images", image_path)
input = Image.open(full_path).convert('RGB')
input = input.resize(resize_dim, Image.ANTIALIAS)
input_trans = test_transform(input) # Transform the image
input_trans = torch.unsqueeze(input_trans, 0)
input_trans = input_trans.to(args["device"])
# Get the cam image
output_image, predicted_label = visualizer.get_cam_image(input_trans,
input.resize((infer_dim, infer_dim), Image.ANTIALIAS))
# Write the cam images to the disc
predicted_label += 1
if predicted_label == image_label:
# Save the PIL image
output_image.save(f"{args['output_directory']}/correct_predictions/"
f"{image_id}_{image_label}_{predicted_label}.jpg")
else:
# Save the PIL image
output_image.save(f"{args['output_directory']}/wrong_predictions/"
f"{image_id}_{image_label}_{predicted_label}.jpg")
if __name__ == "__main__":
main()
| en | 0.650111 | # Add the root folder (ssl_for_fgvc) as the path The class implements the process of getting a cam visualization of an image for a specified model. Constructor, the function initializes the class variables. :param model: Model to be used for the CAM visualization :param model_name: Model name (as per config.yml) :param cam_method: The method to be used for CAM calculation. Available options are "GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM" # Put the model in the evaluation mode # The model name (as per the config.yml) # The cam method # The target layer used to calculate the CAM # The calculated CAM # Set the target layer as per the specified model name # Set cam as per the specified cam method The function selects the target layer as per the specified model name. The function selects the cam visualization method specified by cam_method The function interpolates the class activation maps and return an image of required size :param x: Batch of images (b, c, h, w) # Get the classification label Parse the command line arguments Implements the main flow, i.e. load the dataset & model, generate cam visualizations and save the visualizations # Parse arguments # Create the output directory if not exists # Load configuration # Set the dataset path # Create dataloader # Get the required attributes from the dataset # Create the model # Load pretrained weights # Create CAM visualizer object # Create transforms for performing inference # Iterate over the dataset # Transform the image # Get the cam image # Write the cam images to the disc # Save the PIL image # Save the PIL image | 2.826313 | 3 |
venv/lib/python3.8/site-packages/azureml/_project/project_manager.py | amcclead7336/Enterprise_Data_Science_Final | 0 | 6620140 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
import shutil
import json
from azureml._project.ignore_file import AmlIgnoreFile
import azureml._project.file_utilities as file_utilities
import azureml._project.project_info as project_info
import azureml._project.project_mapper as project_mapper
from azureml._base_sdk_common import __version__ as package_version
_default_git_folder_name = ".git"
_asset_folder_name = "assets"
_base_project_contents_folder_name = "base_project_files"
_conda_dependencies_file_name = "conda_dependencies.yml"
_history_branch_name = "AzureMLHistory"
_link_repo_commit_message = "link"
_create_project_commit_message = "Initial commit"
_run_history_push_commit_message = "Run history"
def _current_index():
requirements_index = None
index_file_path = os.path.join(os.path.dirname(__file__), "index_location.txt")
with open(index_file_path, "r") as file:
prerelease_index = file.read().strip()
if prerelease_index:
requirements_index = prerelease_index
return requirements_index
def _sdk_scope():
scope = []
scope_file_path = os.path.join(os.path.dirname(__file__), "azureml_sdk_scope.txt")
if os.path.exists(scope_file_path):
with open(scope_file_path, "r") as file:
scope = [line.strip() for line in file.readlines()]
return scope
def _base_images_current_tags():
images = {}
current_base_images_file = os.path.join(os.path.dirname(__file__), "azureml_base_images.json")
if os.path.exists(current_base_images_file):
try:
with open(current_base_images_file) as file:
images = json.loads(file.read())
except:
pass
return images
def _get_tagged_image(image_name, default_tag=None):
"""
Return tagged image from azureml_base_images.json, pin to default_tag if missing, else as is
"""
images = _base_images_current_tags()
tag = images.get(image_name, None)
if tag:
return image_name + ":" + tag
else:
return image_name + ((":" + default_tag) if default_tag else "")
def _update_requirements_binding(repo_path, config_dir_to_use):
# These should remain None for the local development scenario.
requirements_version = None
# Set the package version from the __version__ if it's not local development default.
if not package_version.endswith("+dev"):
requirements_version = package_version
requirements_index = _current_index()
default_index = "https://azuremlsdktestpypi.azureedge.net/sdk-release/Preview/E7501C02541B433786111FE8E140CAA1"
conda_dependencies_path = os.path.join(repo_path, config_dir_to_use, _conda_dependencies_file_name)
lines = []
with open(conda_dependencies_path, "r") as infile:
for line in infile:
if requirements_version:
line = line.replace("azureml-defaults", "azureml-defaults==" + requirements_version)
if requirements_index:
line = line.replace(default_index, requirements_index)
lines.append(line)
with open(conda_dependencies_path, 'w') as outfile:
for line in lines:
outfile.write(line)
def attach_project(project_id, project_path, scope, compute_target_dict):
"""
Attaches a local folder specified by project_path as a project.
:type project_id: str
:type project_path: str
:type scope: str
:rtype: None
"""
from azureml._base_sdk_common.common import get_run_config_dir_name
is_existing_dir = os.path.isdir(project_path)
if not is_existing_dir:
# We creating all intermediate dirs too.
os.makedirs(os.path.abspath(project_path))
# check path is a full, rooted path
if not os.path.isabs(project_path):
raise ValueError("Selected directory is invalid")
# For backcompat case, where if path already has aml_config then we just use that, instead of
# creating .azureml
confing_dir_name_to_use = get_run_config_dir_name(project_path)
# check if path is already a project
original_project_info = project_info.get(project_path, no_recursive_check=True)
_create_metadata_folders(project_path, confing_dir_name_to_use)
# Only copying when repo_path is not already a project.
if not original_project_info:
_copy_default_files(os.path.join(project_path, confing_dir_name_to_use),
_base_project_contents_folder_name)
_update_requirements_binding(project_path, confing_dir_name_to_use)
# Creates local and docker runconfigs.
_create_default_run_configs(project_path, compute_target_dict)
# Overwriting if project.json already exists.
project_mapper.add_project(project_id, project_path, scope)
def delete_project(path):
"""
Removes project from mapping. Does not delete entire project from disk.
:type path: str
:rtype: None
"""
project_mapper.remove_project(path)
def _copy_default_files(path, default_fileset):
"""
Copy default files to folder
:type path: str
:rtype: None
"""
this_dir, this_filename = os.path.split(__file__)
default_files_path = os.path.join(this_dir, default_fileset)
if not os.path.exists(path):
os.mkdir(path)
for filename in os.listdir(default_files_path):
orig_path = os.path.join(default_files_path, filename)
new_path = os.path.join(path, filename)
if os.path.isdir(orig_path):
shutil.copytree(orig_path, new_path)
else:
if not os.path.exists(new_path):
shutil.copy(orig_path, new_path)
def _create_metadata_folders(path, confing_dir_name_to_use):
"""
Create metadata files and folders
:type path: str
:rtype: None
"""
file_utilities.create_directory(os.path.join(path, confing_dir_name_to_use))
aml_ignore = AmlIgnoreFile(path)
aml_ignore.create_if_not_exists()
def _ensure_directory_is_valid(path):
"""
Validate the directory
:type path: str
:rtype: None
"""
# check path is a full, rooted path
if not os.path.isabs(path):
raise ValueError("Selected directory is invalid")
# check if path is already a project
if project_info.get(path):
raise ValueError("Directory must not be an existing project")
def empty_function():
return
def _create_default_run_configs(project_directory, compute_target_dict):
"""
Creates a local.runconfig and docker.runconfig for a project.
:return: None
"""
from azureml.core.runconfig import RunConfiguration
# Mocking a project object, as RunConfiguration requires a Project object, but only requires
# project_directory field.
project_object = empty_function
project_object.project_directory = project_directory
# Creating a local runconfig.
local_run_config = RunConfiguration()
local_run_config.save(name="local", path=project_directory)
# Creating a docker runconfig.
docker_run_config = RunConfiguration()
docker_run_config.environment.docker.enabled = True
docker_run_config.save(name="docker", path=project_directory)
for compute_target_name, compute_target in compute_target_dict.items():
# Creating a compute runconfig.
compute_config = RunConfiguration()
if compute_target.type == 'HDInsight':
compute_config.framework = "PySpark"
else:
compute_config.framework = "Python"
compute_config.environment.docker.enabled = True
compute_config.target = compute_target_name
compute_config.save(name=compute_target_name, path=project_directory)
| # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
import shutil
import json
from azureml._project.ignore_file import AmlIgnoreFile
import azureml._project.file_utilities as file_utilities
import azureml._project.project_info as project_info
import azureml._project.project_mapper as project_mapper
from azureml._base_sdk_common import __version__ as package_version
_default_git_folder_name = ".git"
_asset_folder_name = "assets"
_base_project_contents_folder_name = "base_project_files"
_conda_dependencies_file_name = "conda_dependencies.yml"
_history_branch_name = "AzureMLHistory"
_link_repo_commit_message = "link"
_create_project_commit_message = "Initial commit"
_run_history_push_commit_message = "Run history"
def _current_index():
requirements_index = None
index_file_path = os.path.join(os.path.dirname(__file__), "index_location.txt")
with open(index_file_path, "r") as file:
prerelease_index = file.read().strip()
if prerelease_index:
requirements_index = prerelease_index
return requirements_index
def _sdk_scope():
scope = []
scope_file_path = os.path.join(os.path.dirname(__file__), "azureml_sdk_scope.txt")
if os.path.exists(scope_file_path):
with open(scope_file_path, "r") as file:
scope = [line.strip() for line in file.readlines()]
return scope
def _base_images_current_tags():
images = {}
current_base_images_file = os.path.join(os.path.dirname(__file__), "azureml_base_images.json")
if os.path.exists(current_base_images_file):
try:
with open(current_base_images_file) as file:
images = json.loads(file.read())
except:
pass
return images
def _get_tagged_image(image_name, default_tag=None):
"""
Return tagged image from azureml_base_images.json, pin to default_tag if missing, else as is
"""
images = _base_images_current_tags()
tag = images.get(image_name, None)
if tag:
return image_name + ":" + tag
else:
return image_name + ((":" + default_tag) if default_tag else "")
def _update_requirements_binding(repo_path, config_dir_to_use):
# These should remain None for the local development scenario.
requirements_version = None
# Set the package version from the __version__ if it's not local development default.
if not package_version.endswith("+dev"):
requirements_version = package_version
requirements_index = _current_index()
default_index = "https://azuremlsdktestpypi.azureedge.net/sdk-release/Preview/E7501C02541B433786111FE8E140CAA1"
conda_dependencies_path = os.path.join(repo_path, config_dir_to_use, _conda_dependencies_file_name)
lines = []
with open(conda_dependencies_path, "r") as infile:
for line in infile:
if requirements_version:
line = line.replace("azureml-defaults", "azureml-defaults==" + requirements_version)
if requirements_index:
line = line.replace(default_index, requirements_index)
lines.append(line)
with open(conda_dependencies_path, 'w') as outfile:
for line in lines:
outfile.write(line)
def attach_project(project_id, project_path, scope, compute_target_dict):
"""
Attaches a local folder specified by project_path as a project.
:type project_id: str
:type project_path: str
:type scope: str
:rtype: None
"""
from azureml._base_sdk_common.common import get_run_config_dir_name
is_existing_dir = os.path.isdir(project_path)
if not is_existing_dir:
# We creating all intermediate dirs too.
os.makedirs(os.path.abspath(project_path))
# check path is a full, rooted path
if not os.path.isabs(project_path):
raise ValueError("Selected directory is invalid")
# For backcompat case, where if path already has aml_config then we just use that, instead of
# creating .azureml
confing_dir_name_to_use = get_run_config_dir_name(project_path)
# check if path is already a project
original_project_info = project_info.get(project_path, no_recursive_check=True)
_create_metadata_folders(project_path, confing_dir_name_to_use)
# Only copying when repo_path is not already a project.
if not original_project_info:
_copy_default_files(os.path.join(project_path, confing_dir_name_to_use),
_base_project_contents_folder_name)
_update_requirements_binding(project_path, confing_dir_name_to_use)
# Creates local and docker runconfigs.
_create_default_run_configs(project_path, compute_target_dict)
# Overwriting if project.json already exists.
project_mapper.add_project(project_id, project_path, scope)
def delete_project(path):
"""
Removes project from mapping. Does not delete entire project from disk.
:type path: str
:rtype: None
"""
project_mapper.remove_project(path)
def _copy_default_files(path, default_fileset):
"""
Copy default files to folder
:type path: str
:rtype: None
"""
this_dir, this_filename = os.path.split(__file__)
default_files_path = os.path.join(this_dir, default_fileset)
if not os.path.exists(path):
os.mkdir(path)
for filename in os.listdir(default_files_path):
orig_path = os.path.join(default_files_path, filename)
new_path = os.path.join(path, filename)
if os.path.isdir(orig_path):
shutil.copytree(orig_path, new_path)
else:
if not os.path.exists(new_path):
shutil.copy(orig_path, new_path)
def _create_metadata_folders(path, confing_dir_name_to_use):
"""
Create metadata files and folders
:type path: str
:rtype: None
"""
file_utilities.create_directory(os.path.join(path, confing_dir_name_to_use))
aml_ignore = AmlIgnoreFile(path)
aml_ignore.create_if_not_exists()
def _ensure_directory_is_valid(path):
"""
Validate the directory
:type path: str
:rtype: None
"""
# check path is a full, rooted path
if not os.path.isabs(path):
raise ValueError("Selected directory is invalid")
# check if path is already a project
if project_info.get(path):
raise ValueError("Directory must not be an existing project")
def empty_function():
return
def _create_default_run_configs(project_directory, compute_target_dict):
"""
Creates a local.runconfig and docker.runconfig for a project.
:return: None
"""
from azureml.core.runconfig import RunConfiguration
# Mocking a project object, as RunConfiguration requires a Project object, but only requires
# project_directory field.
project_object = empty_function
project_object.project_directory = project_directory
# Creating a local runconfig.
local_run_config = RunConfiguration()
local_run_config.save(name="local", path=project_directory)
# Creating a docker runconfig.
docker_run_config = RunConfiguration()
docker_run_config.environment.docker.enabled = True
docker_run_config.save(name="docker", path=project_directory)
for compute_target_name, compute_target in compute_target_dict.items():
# Creating a compute runconfig.
compute_config = RunConfiguration()
if compute_target.type == 'HDInsight':
compute_config.framework = "PySpark"
else:
compute_config.framework = "Python"
compute_config.environment.docker.enabled = True
compute_config.target = compute_target_name
compute_config.save(name=compute_target_name, path=project_directory)
| en | 0.759515 | # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- Return tagged image from azureml_base_images.json, pin to default_tag if missing, else as is # These should remain None for the local development scenario. # Set the package version from the __version__ if it's not local development default. Attaches a local folder specified by project_path as a project.
:type project_id: str
:type project_path: str
:type scope: str
:rtype: None # We creating all intermediate dirs too. # check path is a full, rooted path # For backcompat case, where if path already has aml_config then we just use that, instead of # creating .azureml # check if path is already a project # Only copying when repo_path is not already a project. # Creates local and docker runconfigs. # Overwriting if project.json already exists. Removes project from mapping. Does not delete entire project from disk.
:type path: str
:rtype: None Copy default files to folder
:type path: str
:rtype: None Create metadata files and folders
:type path: str
:rtype: None Validate the directory
:type path: str
:rtype: None # check path is a full, rooted path # check if path is already a project Creates a local.runconfig and docker.runconfig for a project.
:return: None # Mocking a project object, as RunConfiguration requires a Project object, but only requires # project_directory field. # Creating a local runconfig. # Creating a docker runconfig. # Creating a compute runconfig. | 2.063835 | 2 |
mathdeck/display.py | patrickspencer/mathdeck | 1 | 6620141 | <filename>mathdeck/display.py
# -*- coding: utf-8 -*-
"""
mathdeck.display
~~~~~~~~~~~~~~~~
This module displays a problem by running the main
problem file through a given template.
:copyright: (c) 2014-2016 by <NAME>.
:license: Apache 2.0, see ../LICENSE for more details.
"""
import os
from jinja2 import Environment, FileSystemLoader
class Template(object):
"""
usage:
>> from mathdeck import load, settings
>>
>> problem = 'example1'
>> problem_lib = settings.problem_libs['main']
>> problem_path = problem_lib + problem + '/__init__.py'
>> problem_module = load.load_file_as_module(problem_path)
>> print(display_prob_from_template(problem_path,'web'))
"""
def __init__(self,prob_path,template_name):
self.prob_dir = prob_path
self.template_name = template_name
self.prob_dir = os.path.dirname(prob_path)
self.template_path = prob_dir + '/templates'
self.env = Environment(loader=FileSystemLoader(template_path))
self.template_name = '%s.jinja2' % template
self.template = env.get_template(template_name)
def render(self)
context = problem_module.template_variables
return template.render(**context)
| <filename>mathdeck/display.py
# -*- coding: utf-8 -*-
"""
mathdeck.display
~~~~~~~~~~~~~~~~
This module displays a problem by running the main
problem file through a given template.
:copyright: (c) 2014-2016 by <NAME>.
:license: Apache 2.0, see ../LICENSE for more details.
"""
import os
from jinja2 import Environment, FileSystemLoader
class Template(object):
"""
usage:
>> from mathdeck import load, settings
>>
>> problem = 'example1'
>> problem_lib = settings.problem_libs['main']
>> problem_path = problem_lib + problem + '/__init__.py'
>> problem_module = load.load_file_as_module(problem_path)
>> print(display_prob_from_template(problem_path,'web'))
"""
def __init__(self,prob_path,template_name):
self.prob_dir = prob_path
self.template_name = template_name
self.prob_dir = os.path.dirname(prob_path)
self.template_path = prob_dir + '/templates'
self.env = Environment(loader=FileSystemLoader(template_path))
self.template_name = '%s.jinja2' % template
self.template = env.get_template(template_name)
def render(self)
context = problem_module.template_variables
return template.render(**context)
| en | 0.502824 | # -*- coding: utf-8 -*- mathdeck.display ~~~~~~~~~~~~~~~~ This module displays a problem by running the main problem file through a given template. :copyright: (c) 2014-2016 by <NAME>. :license: Apache 2.0, see ../LICENSE for more details. usage: >> from mathdeck import load, settings >> >> problem = 'example1' >> problem_lib = settings.problem_libs['main'] >> problem_path = problem_lib + problem + '/__init__.py' >> problem_module = load.load_file_as_module(problem_path) >> print(display_prob_from_template(problem_path,'web')) | 2.598012 | 3 |
ship/tomcat.py | universitatjaumei/ship | 0 | 6620142 | <filename>ship/tomcat.py
from logger import ShipLogger
from time import sleep, strftime
from commands import *
import base64
class Tomcat:
def __init__(self, config):
self.host = config.get_tomcat_host()
self.home = config.get_tomcat_home()
self.base = config.get_tomcat_base()
self.version = config.get_tomcat_version()
self.user = config.get_tomcat_username()
self.password = config.get_tomcat_password()
self.http_port = config.get_tomcat_http_port()
self.ajp_port = config.get_tomcat_ajp_port()
self.jmx_port = config.get_tomcat_jmx_port()
self.redirect_port = config.get_tomcat_redirect_port()
self.shutdown_port = config.get_tomcat_shutdown_port()
self.deploy_dir = config.get_tomcat_deploy_directory()
self.memory = config.get_tomcat_memory()
self.logger = ShipLogger()
def startup(self):
result = run(self.home + "/bin/startup.sh", pty=False)
if result.return_code != 0:
error_message = "The server could not be started"
self.logger.error(error_message)
abort(error_message)
return
times = 1
while not self._running() and times < 10:
sleep(10)
times += 1
self.logger.info("Trying to start the tomcat server...")
if times == 10:
error_message = "Can not complete the server startup"
self.logger.error(error_message)
abort(error_message)
self.logger.info("Tomcat startup process completed")
def shutdown(self):
try:
result = run(self.home + "/bin/shutdown.sh -force")
except Exception as e:
pass
def deploy(self, module):
appname = module.get_name()
warfile = "%s/target/%s.war" % (module.get_directory(), appname)
run("rm -rf " + self.home + "/work")
run("rm -rf " + self.home + "/webapps/" + appname)
self.logger.info("Copying WAR of module '" + appname + "' to remote host: %s" % self.deploy_dir)
put(local_path=warfile, remote_path=self.deploy_dir)
def install(self):
current_date = strftime("%Y%m%d-%H%M%S")
run("wget -q http://static.uji.es/services/docker/apache-tomcat-%s.tar.gz -O /tmp/tomcat.tar.gz" % self.version)
run("tar xfz /tmp/tomcat.tar.gz -C %s" % self.base)
run("mv %s/apache-tomcat-%s %s" % (self.base, self.version, self.home))
run("rm /tmp/tomcat.tar.gz")
# configure_javahome_startup
for filename in ["startup.sh", "shutdown.sh"]:
remote_file = "%s/bin/%s" % (self.home, filename)
local_file = "/tmp/%s.%s" % (filename, current_date)
get(remote_file, local_file)
file = open(local_file, "r")
content = file.readlines()
file.close()
content.insert(21, "export JAVA_HOME=/mnt/data/aplicacions/sdk/jdk1.8.0_45\nexport PATH=$JAVA_HOME/bin:$PATH\n\n")
file = open(local_file, "w")
file.write("".join(content))
file.close()
put(local_file, remote_file)
# configure_tomcat_env
file = open("/tmp/setenv.sh.%s" % current_date, "w")
file.write("#!/bin/sh\n\n")
file.write("export LC_ALL=\"es_ES.UTF-8\"\n")
file.write("export LANG=\"es_ES.UTF-8\"\n")
file.write("export JAVA_OPTS=\"%s -Dfile.encoding=UTF-8 -XX:+CMSClassUnloadingEnabled\"\n" % self.memory)
file.write("export CATALINA_PID=$CATALINA_BASE/tomcat.pid\n")
file.write("export CATALINA_OPTS=\"-Djava.awt.headless=true -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=%s -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false\"\n" % self.jmx_port)
file.close()
put("/tmp/setenv.sh.%s" % current_date, "%s/bin/setenv.sh" % self.home)
run("chmod u+x %s/bin/setenv.sh" % self.home)
# configure_tomcat
file = open("/tmp/server.xml.%s" % current_date, "w")
file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
file.write("<Server port=\"%s\" shutdown=\"SHUTDOWN\">\n" % self.shutdown_port)
file.write(" <Listener className=\"org.apache.catalina.startup.VersionLoggerListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.AprLifecycleListener\" SSLEngine=\"on\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.JreMemoryLeakPreventionListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.mbeans.GlobalResourcesLifecycleListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.ThreadLocalLeakPreventionListener\" />\n\n")
file.write(" <GlobalNamingResources>\n")
file.write(" <Resource name=\"UserDatabase\" auth=\"Container\"\n")
file.write(" type=\"org.apache.catalina.UserDatabase\"\n")
file.write(" description=\"User database that can be updated and saved\"\n")
file.write(" factory=\"org.apache.catalina.users.MemoryUserDatabaseFactory\"\n")
file.write(" pathname=\"conf/tomcat-users.xml\" />\n")
file.write(" </GlobalNamingResources>\n\n")
file.write(" <Service name=\"Catalina\">\n")
file.write(" <Connector port=\"%s\" protocol=\"HTTP/1.1\" connectionTimeout=\"20000\" redirectPort=\"%s\" URIEncoding=\"UTF-8\" />\n" %
(self.http_port, self.redirect_port))
file.write(" <Connector port=\"%s\" protocol=\"AJP/1.3\" redirectPort=\"%s\" URIEncoding=\"UTF-8\" />\n\n" %
(self.ajp_port, self.redirect_port))
file.write(" <Engine name=\"Catalina\" defaultHost=\"localhost\">\n")
file.write(" <Realm className=\"org.apache.catalina.realm.LockOutRealm\">\n")
file.write(" <Realm className=\"org.apache.catalina.realm.UserDatabaseRealm\" resourceName=\"UserDatabase\"/>\n")
file.write(" </Realm>\n\n")
file.write(" <Host name=\"localhost\" appBase=\"webapps\" unpackWARs=\"true\" autoDeploy=\"false\">\n")
file.write(" <Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"logs\"\n")
file.write(" prefix=\"localhost_access_log\" suffix=\".txt\"\n")
file.write(" pattern=\"%h %l %u %t "%r" %s %b\" />\n")
file.write(" </Host>\n")
file.write(" </Engine>\n")
file.write(" </Service>\n")
file.write("</Server>")
file.close()
put("/tmp/server.xml.%s" % current_date, "%s/conf/server.xml" % self.home)
def uninstall(self):
if not directory_exists(self.home): return
current_date = strftime("%Y%m%d-%H%M%S")
self.shutdown()
run("mv %s /tmp/%s.%s" % (seself.home, self.home.split("/")[-1], current_date))
def _running(self):
try:
url = "http://%s:%s/manager/text/list" % (self.host, self.http_port)
hashed_password = base64.b64encode("%<PASSWORD>" % (self.user, self.password))
data = run("curl -H 'Authorization: Basic %s' %s" % (hashed_password, url))
return data[:4] == "OK -"
except:
import traceback
print traceback.format_exc()
return False
# def activate_redis_sessions(app, config):
# catalina_home = BASE + "/" + app
#
# local(
# "wget -q http://static.uji.es/services/docker/redis-store-1.3.0.BUILD-SNAPSHOT.jar -O %s/lib/redis-store-1.3.0.BUILD-SNAPSHOT.jar" % catalina_home)
#
# file = open("%s/conf/context.xml" % catalina_home, "w")
# file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
# file.write("<Context>\n")
# file.write(" <WatchedResource>WEB-INF/web.xml</WatchedResource>\n")
# file.write(" <WatchedResource>${catalina.base}/conf/web.xml</WatchedResource>\n")
# file.write(" <Valve className=\"com.gopivotal.manager.SessionFlushValve\" />\n")
# file.write(" <Manager className=\"org.apache.catalina.session.PersistentManager\">\n")
# file.write(" <Store className=\"com.gopivotal.manager.redis.RedisStore\" host=\"infra01.uji.es\" />\n")
# file.write(" </Manager>\n")
# file.write("</Context>")
# file.close()
#
#
# def configure_tomcat_access_manager(app, config):
# catalina_home = BASE + "/" + app
#
# file = open("%s/conf/tomcat-users.xml" % catalina_home, "w")
# file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
# file.write("<tomcat-users>\n")
# file.write(" <role rolename=\"manager-gui\"/>\n")
# file.write(" <role rolename=\"manager-script\"/>\n")
# file.write(" <user username=\"tomcat\" password=\"<PASSWORD>\" roles=\"manager-gui, manager-script\"/>\n")
# file.write("</tomcat-users>\n")
# file.close()
#
# if __name__ == "__main__":
# fabric.api.env.host_string = "<EMAIL>"
# fabric.api.env.password = "<PASSWORD>"
#
# app = "apa"
# config = ujiapps["apexp02.uji.es"][app]
#
# clear_if_exists(app, config)
#
# install_tomcat(app, config)
# configure_javahome_startup(app, config)
# configure_tomcat_env(app, config)
# activate_redis_sessions(app, config)
# configure_tomcat_access_manager(app, config)
# configure_tomcat(app, config)
| <filename>ship/tomcat.py
from logger import ShipLogger
from time import sleep, strftime
from commands import *
import base64
class Tomcat:
def __init__(self, config):
self.host = config.get_tomcat_host()
self.home = config.get_tomcat_home()
self.base = config.get_tomcat_base()
self.version = config.get_tomcat_version()
self.user = config.get_tomcat_username()
self.password = config.get_tomcat_password()
self.http_port = config.get_tomcat_http_port()
self.ajp_port = config.get_tomcat_ajp_port()
self.jmx_port = config.get_tomcat_jmx_port()
self.redirect_port = config.get_tomcat_redirect_port()
self.shutdown_port = config.get_tomcat_shutdown_port()
self.deploy_dir = config.get_tomcat_deploy_directory()
self.memory = config.get_tomcat_memory()
self.logger = ShipLogger()
def startup(self):
result = run(self.home + "/bin/startup.sh", pty=False)
if result.return_code != 0:
error_message = "The server could not be started"
self.logger.error(error_message)
abort(error_message)
return
times = 1
while not self._running() and times < 10:
sleep(10)
times += 1
self.logger.info("Trying to start the tomcat server...")
if times == 10:
error_message = "Can not complete the server startup"
self.logger.error(error_message)
abort(error_message)
self.logger.info("Tomcat startup process completed")
def shutdown(self):
try:
result = run(self.home + "/bin/shutdown.sh -force")
except Exception as e:
pass
def deploy(self, module):
appname = module.get_name()
warfile = "%s/target/%s.war" % (module.get_directory(), appname)
run("rm -rf " + self.home + "/work")
run("rm -rf " + self.home + "/webapps/" + appname)
self.logger.info("Copying WAR of module '" + appname + "' to remote host: %s" % self.deploy_dir)
put(local_path=warfile, remote_path=self.deploy_dir)
def install(self):
current_date = strftime("%Y%m%d-%H%M%S")
run("wget -q http://static.uji.es/services/docker/apache-tomcat-%s.tar.gz -O /tmp/tomcat.tar.gz" % self.version)
run("tar xfz /tmp/tomcat.tar.gz -C %s" % self.base)
run("mv %s/apache-tomcat-%s %s" % (self.base, self.version, self.home))
run("rm /tmp/tomcat.tar.gz")
# configure_javahome_startup
for filename in ["startup.sh", "shutdown.sh"]:
remote_file = "%s/bin/%s" % (self.home, filename)
local_file = "/tmp/%s.%s" % (filename, current_date)
get(remote_file, local_file)
file = open(local_file, "r")
content = file.readlines()
file.close()
content.insert(21, "export JAVA_HOME=/mnt/data/aplicacions/sdk/jdk1.8.0_45\nexport PATH=$JAVA_HOME/bin:$PATH\n\n")
file = open(local_file, "w")
file.write("".join(content))
file.close()
put(local_file, remote_file)
# configure_tomcat_env
file = open("/tmp/setenv.sh.%s" % current_date, "w")
file.write("#!/bin/sh\n\n")
file.write("export LC_ALL=\"es_ES.UTF-8\"\n")
file.write("export LANG=\"es_ES.UTF-8\"\n")
file.write("export JAVA_OPTS=\"%s -Dfile.encoding=UTF-8 -XX:+CMSClassUnloadingEnabled\"\n" % self.memory)
file.write("export CATALINA_PID=$CATALINA_BASE/tomcat.pid\n")
file.write("export CATALINA_OPTS=\"-Djava.awt.headless=true -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=%s -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false\"\n" % self.jmx_port)
file.close()
put("/tmp/setenv.sh.%s" % current_date, "%s/bin/setenv.sh" % self.home)
run("chmod u+x %s/bin/setenv.sh" % self.home)
# configure_tomcat
file = open("/tmp/server.xml.%s" % current_date, "w")
file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
file.write("<Server port=\"%s\" shutdown=\"SHUTDOWN\">\n" % self.shutdown_port)
file.write(" <Listener className=\"org.apache.catalina.startup.VersionLoggerListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.AprLifecycleListener\" SSLEngine=\"on\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.JreMemoryLeakPreventionListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.mbeans.GlobalResourcesLifecycleListener\" />\n")
file.write(" <Listener className=\"org.apache.catalina.core.ThreadLocalLeakPreventionListener\" />\n\n")
file.write(" <GlobalNamingResources>\n")
file.write(" <Resource name=\"UserDatabase\" auth=\"Container\"\n")
file.write(" type=\"org.apache.catalina.UserDatabase\"\n")
file.write(" description=\"User database that can be updated and saved\"\n")
file.write(" factory=\"org.apache.catalina.users.MemoryUserDatabaseFactory\"\n")
file.write(" pathname=\"conf/tomcat-users.xml\" />\n")
file.write(" </GlobalNamingResources>\n\n")
file.write(" <Service name=\"Catalina\">\n")
file.write(" <Connector port=\"%s\" protocol=\"HTTP/1.1\" connectionTimeout=\"20000\" redirectPort=\"%s\" URIEncoding=\"UTF-8\" />\n" %
(self.http_port, self.redirect_port))
file.write(" <Connector port=\"%s\" protocol=\"AJP/1.3\" redirectPort=\"%s\" URIEncoding=\"UTF-8\" />\n\n" %
(self.ajp_port, self.redirect_port))
file.write(" <Engine name=\"Catalina\" defaultHost=\"localhost\">\n")
file.write(" <Realm className=\"org.apache.catalina.realm.LockOutRealm\">\n")
file.write(" <Realm className=\"org.apache.catalina.realm.UserDatabaseRealm\" resourceName=\"UserDatabase\"/>\n")
file.write(" </Realm>\n\n")
file.write(" <Host name=\"localhost\" appBase=\"webapps\" unpackWARs=\"true\" autoDeploy=\"false\">\n")
file.write(" <Valve className=\"org.apache.catalina.valves.AccessLogValve\" directory=\"logs\"\n")
file.write(" prefix=\"localhost_access_log\" suffix=\".txt\"\n")
file.write(" pattern=\"%h %l %u %t "%r" %s %b\" />\n")
file.write(" </Host>\n")
file.write(" </Engine>\n")
file.write(" </Service>\n")
file.write("</Server>")
file.close()
put("/tmp/server.xml.%s" % current_date, "%s/conf/server.xml" % self.home)
def uninstall(self):
if not directory_exists(self.home): return
current_date = strftime("%Y%m%d-%H%M%S")
self.shutdown()
run("mv %s /tmp/%s.%s" % (seself.home, self.home.split("/")[-1], current_date))
def _running(self):
try:
url = "http://%s:%s/manager/text/list" % (self.host, self.http_port)
hashed_password = base64.b64encode("%<PASSWORD>" % (self.user, self.password))
data = run("curl -H 'Authorization: Basic %s' %s" % (hashed_password, url))
return data[:4] == "OK -"
except:
import traceback
print traceback.format_exc()
return False
# def activate_redis_sessions(app, config):
# catalina_home = BASE + "/" + app
#
# local(
# "wget -q http://static.uji.es/services/docker/redis-store-1.3.0.BUILD-SNAPSHOT.jar -O %s/lib/redis-store-1.3.0.BUILD-SNAPSHOT.jar" % catalina_home)
#
# file = open("%s/conf/context.xml" % catalina_home, "w")
# file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
# file.write("<Context>\n")
# file.write(" <WatchedResource>WEB-INF/web.xml</WatchedResource>\n")
# file.write(" <WatchedResource>${catalina.base}/conf/web.xml</WatchedResource>\n")
# file.write(" <Valve className=\"com.gopivotal.manager.SessionFlushValve\" />\n")
# file.write(" <Manager className=\"org.apache.catalina.session.PersistentManager\">\n")
# file.write(" <Store className=\"com.gopivotal.manager.redis.RedisStore\" host=\"infra01.uji.es\" />\n")
# file.write(" </Manager>\n")
# file.write("</Context>")
# file.close()
#
#
# def configure_tomcat_access_manager(app, config):
# catalina_home = BASE + "/" + app
#
# file = open("%s/conf/tomcat-users.xml" % catalina_home, "w")
# file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
# file.write("<tomcat-users>\n")
# file.write(" <role rolename=\"manager-gui\"/>\n")
# file.write(" <role rolename=\"manager-script\"/>\n")
# file.write(" <user username=\"tomcat\" password=\"<PASSWORD>\" roles=\"manager-gui, manager-script\"/>\n")
# file.write("</tomcat-users>\n")
# file.close()
#
# if __name__ == "__main__":
# fabric.api.env.host_string = "<EMAIL>"
# fabric.api.env.password = "<PASSWORD>"
#
# app = "apa"
# config = ujiapps["apexp02.uji.es"][app]
#
# clear_if_exists(app, config)
#
# install_tomcat(app, config)
# configure_javahome_startup(app, config)
# configure_tomcat_env(app, config)
# activate_redis_sessions(app, config)
# configure_tomcat_access_manager(app, config)
# configure_tomcat(app, config)
| en | 0.349956 | # configure_javahome_startup # configure_tomcat_env # configure_tomcat # def activate_redis_sessions(app, config): # catalina_home = BASE + "/" + app # # local( # "wget -q http://static.uji.es/services/docker/redis-store-1.3.0.BUILD-SNAPSHOT.jar -O %s/lib/redis-store-1.3.0.BUILD-SNAPSHOT.jar" % catalina_home) # # file = open("%s/conf/context.xml" % catalina_home, "w") # file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n") # file.write("<Context>\n") # file.write(" <WatchedResource>WEB-INF/web.xml</WatchedResource>\n") # file.write(" <WatchedResource>${catalina.base}/conf/web.xml</WatchedResource>\n") # file.write(" <Valve className=\"com.gopivotal.manager.SessionFlushValve\" />\n") # file.write(" <Manager className=\"org.apache.catalina.session.PersistentManager\">\n") # file.write(" <Store className=\"com.gopivotal.manager.redis.RedisStore\" host=\"infra01.uji.es\" />\n") # file.write(" </Manager>\n") # file.write("</Context>") # file.close() # # # def configure_tomcat_access_manager(app, config): # catalina_home = BASE + "/" + app # # file = open("%s/conf/tomcat-users.xml" % catalina_home, "w") # file.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n") # file.write("<tomcat-users>\n") # file.write(" <role rolename=\"manager-gui\"/>\n") # file.write(" <role rolename=\"manager-script\"/>\n") # file.write(" <user username=\"tomcat\" password=\"<PASSWORD>\" roles=\"manager-gui, manager-script\"/>\n") # file.write("</tomcat-users>\n") # file.close() # # if __name__ == "__main__": # fabric.api.env.host_string = "<EMAIL>" # fabric.api.env.password = "<PASSWORD>" # # app = "apa" # config = ujiapps["apexp02.uji.es"][app] # # clear_if_exists(app, config) # # install_tomcat(app, config) # configure_javahome_startup(app, config) # configure_tomcat_env(app, config) # activate_redis_sessions(app, config) # configure_tomcat_access_manager(app, config) # configure_tomcat(app, config) | 2.886127 | 3 |
recipes/Python/576620_ChangeDirectory_context_manager/recipe-576620.py | tdiprima/code | 2,023 | 6620143 | <gh_stars>1000+
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from __future__ import with_statement
import os
import os.path
class ChangeDirectory(object):
"""
ChangeDirectory is a context manager that allowing
you to temporary change the working directory.
>>> import tempfile
>>> td = os.path.realpath(tempfile.mkdtemp())
>>> currentdirectory = os.getcwd()
>>> with ChangeDirectory(td) as cd:
... assert cd.current == td
... assert os.getcwd() == td
... assert cd.previous == currentdirectory
... assert os.path.normpath(os.path.join(cd.current, cd.relative)) == cd.previous
...
>>> assert os.getcwd() == currentdirectory
>>> with ChangeDirectory(td) as cd:
... os.mkdir('foo')
... with ChangeDirectory('foo') as cd2:
... assert cd2.previous == cd.current
... assert cd2.relative == '..'
... assert os.getcwd() == os.path.join(td, 'foo')
... assert os.getcwd() == td
... assert cd.current == td
... os.rmdir('foo')
...
>>> os.rmdir(td)
>>> with ChangeDirectory('.') as cd:
... assert cd.current == currentdirectory
... assert cd.current == cd.previous
... assert cd.relative == '.'
"""
def __init__(self, directory):
self._dir = directory
self._cwd = os.getcwd()
self._pwd = self._cwd
@property
def current(self):
return self._cwd
@property
def previous(self):
return self._pwd
@property
def relative(self):
c = self._cwd.split(os.path.sep)
p = self._pwd.split(os.path.sep)
l = min(len(c), len(p))
i = 0
while i < l and c[i] == p[i]:
i += 1
return os.path.normpath(os.path.join(*(['.'] + (['..'] * (len(c) - i)) + p[i:])))
def __enter__(self):
self._pwd = self._cwd
os.chdir(self._dir)
self._cwd = os.getcwd()
return self
def __exit__(self, *args):
os.chdir(self._pwd)
self._cwd = self._pwd
if __name__ == '__main__':
import doctest
doctest.testmod()
| #!/usr/bin/python
# -*- encoding: utf-8 -*-
from __future__ import with_statement
import os
import os.path
class ChangeDirectory(object):
"""
ChangeDirectory is a context manager that allowing
you to temporary change the working directory.
>>> import tempfile
>>> td = os.path.realpath(tempfile.mkdtemp())
>>> currentdirectory = os.getcwd()
>>> with ChangeDirectory(td) as cd:
... assert cd.current == td
... assert os.getcwd() == td
... assert cd.previous == currentdirectory
... assert os.path.normpath(os.path.join(cd.current, cd.relative)) == cd.previous
...
>>> assert os.getcwd() == currentdirectory
>>> with ChangeDirectory(td) as cd:
... os.mkdir('foo')
... with ChangeDirectory('foo') as cd2:
... assert cd2.previous == cd.current
... assert cd2.relative == '..'
... assert os.getcwd() == os.path.join(td, 'foo')
... assert os.getcwd() == td
... assert cd.current == td
... os.rmdir('foo')
...
>>> os.rmdir(td)
>>> with ChangeDirectory('.') as cd:
... assert cd.current == currentdirectory
... assert cd.current == cd.previous
... assert cd.relative == '.'
"""
def __init__(self, directory):
self._dir = directory
self._cwd = os.getcwd()
self._pwd = self._cwd
@property
def current(self):
return self._cwd
@property
def previous(self):
return self._pwd
@property
def relative(self):
c = self._cwd.split(os.path.sep)
p = self._pwd.split(os.path.sep)
l = min(len(c), len(p))
i = 0
while i < l and c[i] == p[i]:
i += 1
return os.path.normpath(os.path.join(*(['.'] + (['..'] * (len(c) - i)) + p[i:])))
def __enter__(self):
self._pwd = self._cwd
os.chdir(self._dir)
self._cwd = os.getcwd()
return self
def __exit__(self, *args):
os.chdir(self._pwd)
self._cwd = self._pwd
if __name__ == '__main__':
import doctest
doctest.testmod() | en | 0.605182 | #!/usr/bin/python # -*- encoding: utf-8 -*- ChangeDirectory is a context manager that allowing you to temporary change the working directory. >>> import tempfile >>> td = os.path.realpath(tempfile.mkdtemp()) >>> currentdirectory = os.getcwd() >>> with ChangeDirectory(td) as cd: ... assert cd.current == td ... assert os.getcwd() == td ... assert cd.previous == currentdirectory ... assert os.path.normpath(os.path.join(cd.current, cd.relative)) == cd.previous ... >>> assert os.getcwd() == currentdirectory >>> with ChangeDirectory(td) as cd: ... os.mkdir('foo') ... with ChangeDirectory('foo') as cd2: ... assert cd2.previous == cd.current ... assert cd2.relative == '..' ... assert os.getcwd() == os.path.join(td, 'foo') ... assert os.getcwd() == td ... assert cd.current == td ... os.rmdir('foo') ... >>> os.rmdir(td) >>> with ChangeDirectory('.') as cd: ... assert cd.current == currentdirectory ... assert cd.current == cd.previous ... assert cd.relative == '.' | 3.554298 | 4 |
Algorithms/Mathematical Algorithms/catalan_nobi1007.py | Praggya17/HacktoberFestContribute | 98 | 6620144 | def inner(x):
fact=1
for i in range(1,x+1):
fact*=i
return fact
n = int(input().strip())
catalan_num = inner(2*n)//(inner(n)*inner(n+1))
print(catalan_num) | def inner(x):
fact=1
for i in range(1,x+1):
fact*=i
return fact
n = int(input().strip())
catalan_num = inner(2*n)//(inner(n)*inner(n+1))
print(catalan_num) | none | 1 | 3.717057 | 4 | |
scripts/mpi/halo_av_qty.py | lconaboy/seren3 | 1 | 6620145 | <reponame>lconaboy/seren3
import numpy as np
def _volume_weighted_average(field, halo, npoints=100000):
points = halo.sphere.random_points(npoints)
dset = halo.g[field].sample_points(points, use_multiprocessing=False)
return dset[field].mean()
def _mass_weighted_average(field, halo, mass_units="Msol h**-1"):
dset = halo.g[[field, "mass"]].flatten()
cell_mass = dset["mass"].in_units(mass_units)
return np.sum(dset[field]*cell_mass)/cell_mass.sum()
def main(path, iout, field, pickle_path=None):
import seren3
import pickle, os
from seren3.analysis.parallel import mpi
mpi.msg("Loading data")
snap = seren3.load_snapshot(path, iout)
# snap.set_nproc(1) # disbale multiprocessing/threading
snap.set_nproc(8)
halos = snap.halos()
halo_ix = None
if mpi.host:
halo_ix = halos.halo_ix(shuffle=True)
dest = {}
for i, sto in mpi.piter(halo_ix, storage=dest):
h = halos[i]
mpi.msg("Working on halo %i \t %i" % (i, h.hid))
# vw = _volume_weighted_average(field, h)
mw = _mass_weighted_average(field, h)
if (np.isinf(mw) or np.isnan(mw)):
continue
# vw = _volume_weighted_average_cube(snap, field, h)
mpi.msg("%i \t %1.2e" % (h.hid, mw))
sto.idx = h["id"]
# sto.result = {"vw" : vw, "mw" : mw}
sto.result = {"mw" : mw}
if mpi.host:
if pickle_path is None:
pickle_path = "%s/pickle/" % path
if os.path.isdir(pickle_path) is False:
os.mkdir(pickle_path)
fname = "%s/%s_halo_av_%05i.p" % (pickle_path, field, iout)
pickle.dump( mpi.unpack(dest), open( fname, "wb" ) )
mpi.msg("Done")
if __name__ == "__main__":
import sys
path = sys.argv[1]
iout = int(sys.argv[2])
field = sys.argv[3]
pickle_path = None
if len(sys.argv) > 4:
pickle_path = sys.argv[4]
main(path, iout, field, pickle_path)
| import numpy as np
def _volume_weighted_average(field, halo, npoints=100000):
points = halo.sphere.random_points(npoints)
dset = halo.g[field].sample_points(points, use_multiprocessing=False)
return dset[field].mean()
def _mass_weighted_average(field, halo, mass_units="Msol h**-1"):
dset = halo.g[[field, "mass"]].flatten()
cell_mass = dset["mass"].in_units(mass_units)
return np.sum(dset[field]*cell_mass)/cell_mass.sum()
def main(path, iout, field, pickle_path=None):
import seren3
import pickle, os
from seren3.analysis.parallel import mpi
mpi.msg("Loading data")
snap = seren3.load_snapshot(path, iout)
# snap.set_nproc(1) # disbale multiprocessing/threading
snap.set_nproc(8)
halos = snap.halos()
halo_ix = None
if mpi.host:
halo_ix = halos.halo_ix(shuffle=True)
dest = {}
for i, sto in mpi.piter(halo_ix, storage=dest):
h = halos[i]
mpi.msg("Working on halo %i \t %i" % (i, h.hid))
# vw = _volume_weighted_average(field, h)
mw = _mass_weighted_average(field, h)
if (np.isinf(mw) or np.isnan(mw)):
continue
# vw = _volume_weighted_average_cube(snap, field, h)
mpi.msg("%i \t %1.2e" % (h.hid, mw))
sto.idx = h["id"]
# sto.result = {"vw" : vw, "mw" : mw}
sto.result = {"mw" : mw}
if mpi.host:
if pickle_path is None:
pickle_path = "%s/pickle/" % path
if os.path.isdir(pickle_path) is False:
os.mkdir(pickle_path)
fname = "%s/%s_halo_av_%05i.p" % (pickle_path, field, iout)
pickle.dump( mpi.unpack(dest), open( fname, "wb" ) )
mpi.msg("Done")
if __name__ == "__main__":
import sys
path = sys.argv[1]
iout = int(sys.argv[2])
field = sys.argv[3]
pickle_path = None
if len(sys.argv) > 4:
pickle_path = sys.argv[4]
main(path, iout, field, pickle_path) | en | 0.352203 | # snap.set_nproc(1) # disbale multiprocessing/threading # vw = _volume_weighted_average(field, h) # vw = _volume_weighted_average_cube(snap, field, h) # sto.result = {"vw" : vw, "mw" : mw} | 2.143774 | 2 |
pyPLM/Widgets/MessageBox.py | vtta2008/pipelineTool | 7 | 6620146 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Script Name: PopupMessage.py
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from PySide2.QtWidgets import QMessageBox
from pyPLM.damg import DAMGDICT
class MessageBox(QMessageBox):
Type = 'DAMGUI'
key = 'Widget'
_name = 'DAMG Widget'
buttons = DAMGDICT()
def __init__(self, parent=None, title="auto", level="auto", message="test message", btns=[], flag=None):
QMessageBox.__init__(self)
self._parent = parent
self._title = title
self._level = level
self._message = message
self.btns = btns
self.flag = flag
if self._title == 'auto' or self._title is None:
self.title = self._level
else:
self.title = self._title
if self.flag:
self.setWindowFlag(self.flag)
self.icon = self.getIcon()
self.level = self.getLevel()
if type(self.btns) in [str]:
self.btns = self.getBtnSetting('ok')
else:
for btn in self.btns:
self.addButton(btn, self.getBtnSetting(btn))
def addBtn(self, btn):
button = self.addButton(btn, self.getBtnSetting(btn))
self.buttons.add(btn, button)
return button
def getLevel(self):
levels = dict(
about = self.about,
information = self.information,
question = self.question,
warning = self.warning,
critical = self.critical,
)
return levels[self._level]
def getIcon(self):
icons = dict(
about = self.NoIcon,
information = self.Information,
question = self.Question,
warning = self.Warning,
critical = self.Critical,
)
if self._level in icons.keys():
return icons[self._level]
else:
from pyPLM.Gui import AppIcon
AppIcon(self._level)
def getBtnSetting(self, btn):
buttons = dict(
ok = self.Ok,
open = self.Open,
save = self.Save,
cancel = self.Cancel,
close = self.Close,
yes = self.Yes,
no = self.No,
abort = self.Abort,
retry = self.Retry,
ignore = self.Ignore,
discard = self.Discard,
yes_no = self.Yes|QMessageBox.No,
retry_close = self.Retry|QMessageBox.Close,
Overwrite = self.NoRole,
Rename = self.RejectRole,
Resume = self.YesRole,
)
return buttons[btn]
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 23/10/2019 - 8:57 AM
# © 2017 - 2018 DAMGteam. All rights reserved | # -*- coding: utf-8 -*-
"""
Script Name: PopupMessage.py
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from PySide2.QtWidgets import QMessageBox
from pyPLM.damg import DAMGDICT
class MessageBox(QMessageBox):
Type = 'DAMGUI'
key = 'Widget'
_name = 'DAMG Widget'
buttons = DAMGDICT()
def __init__(self, parent=None, title="auto", level="auto", message="test message", btns=[], flag=None):
QMessageBox.__init__(self)
self._parent = parent
self._title = title
self._level = level
self._message = message
self.btns = btns
self.flag = flag
if self._title == 'auto' or self._title is None:
self.title = self._level
else:
self.title = self._title
if self.flag:
self.setWindowFlag(self.flag)
self.icon = self.getIcon()
self.level = self.getLevel()
if type(self.btns) in [str]:
self.btns = self.getBtnSetting('ok')
else:
for btn in self.btns:
self.addButton(btn, self.getBtnSetting(btn))
def addBtn(self, btn):
button = self.addButton(btn, self.getBtnSetting(btn))
self.buttons.add(btn, button)
return button
def getLevel(self):
levels = dict(
about = self.about,
information = self.information,
question = self.question,
warning = self.warning,
critical = self.critical,
)
return levels[self._level]
def getIcon(self):
icons = dict(
about = self.NoIcon,
information = self.Information,
question = self.Question,
warning = self.Warning,
critical = self.Critical,
)
if self._level in icons.keys():
return icons[self._level]
else:
from pyPLM.Gui import AppIcon
AppIcon(self._level)
def getBtnSetting(self, btn):
buttons = dict(
ok = self.Ok,
open = self.Open,
save = self.Save,
cancel = self.Cancel,
close = self.Close,
yes = self.Yes,
no = self.No,
abort = self.Abort,
retry = self.Retry,
ignore = self.Ignore,
discard = self.Discard,
yes_no = self.Yes|QMessageBox.No,
retry_close = self.Retry|QMessageBox.Close,
Overwrite = self.NoRole,
Rename = self.RejectRole,
Resume = self.YesRole,
)
return buttons[btn]
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 23/10/2019 - 8:57 AM
# © 2017 - 2018 DAMGteam. All rights reserved | en | 0.425425 | # -*- coding: utf-8 -*- Script Name: PopupMessage.py Author: <NAME>/Jimmy - 3D artist. Description: # ------------------------------------------------------------------------------------------------------------- # ------------------------------------------------------------------------------------------------------------- # Created by panda on 23/10/2019 - 8:57 AM # © 2017 - 2018 DAMGteam. All rights reserved | 2.45731 | 2 |
Season 09 - Advanced built-in functions in Python/Episode 05 - map() function in python.py | Pythobit/Python-tutorial | 3 | 6620147 | <gh_stars>1-10
# map() function in python
friends = ['KenDall', 'Kylie', 'Randy', 'Anna', 'Marie']
start_with_r = filter(lambda friend: friend .starts_with_r('R'), friends)
friends_lower = map(lambda x: x.lower(), friends)
print(next(friends_lower))
class User:
def __init__(self, username, password):
self.username = username
self.password = password
@classmethod
def from_dict(cls, data):
return cls(data['username'], data['password'])
users = [
{'username': 'kendall', 'password': '<PASSWORD>'}
{'username': 'iamawesome', 'password': '<PASSWORD>'}
]
# users = [User.from_dict(user) for user in users]
users = map(User.from_dict, users) # map is more readable
| # map() function in python
friends = ['KenDall', 'Kylie', 'Randy', 'Anna', 'Marie']
start_with_r = filter(lambda friend: friend .starts_with_r('R'), friends)
friends_lower = map(lambda x: x.lower(), friends)
print(next(friends_lower))
class User:
def __init__(self, username, password):
self.username = username
self.password = password
@classmethod
def from_dict(cls, data):
return cls(data['username'], data['password'])
users = [
{'username': 'kendall', 'password': '<PASSWORD>'}
{'username': 'iamawesome', 'password': '<PASSWORD>'}
]
# users = [User.from_dict(user) for user in users]
users = map(User.from_dict, users) # map is more readable | en | 0.641832 | # map() function in python # users = [User.from_dict(user) for user in users] # map is more readable | 3.978269 | 4 |
mkdocs/contrib/source_url/__init__.py | tuenti/mkdocs | 0 | 6620148 | from mkdocs.contrib.source_url.extension import SourceCodeLinkExtension
from mkdocs.contrib.source_url.plugin import SourceUrlPlugin
__all__ = ["SourceCodeLinkExtension", "SourceUrlPlugin"]
| from mkdocs.contrib.source_url.extension import SourceCodeLinkExtension
from mkdocs.contrib.source_url.plugin import SourceUrlPlugin
__all__ = ["SourceCodeLinkExtension", "SourceUrlPlugin"]
| none | 1 | 1.161478 | 1 | |
tests/cloud_functions/test_create_instrument_case_tasks.py | ONSdigital/blaise-totalmobile-client | 0 | 6620149 | from unittest import mock
import blaise_restapi
import flask
import pytest
from google.cloud import tasks_v2
from appconfig import Config
from client.optimise import OptimiseClient
from cloud_functions.create_instrument_case_tasks import (
create_instrument_case_tasks,
create_task_name,
create_tasks,
filter_cases,
map_totalmobile_job_models,
prepare_tasks,
retrieve_case_data,
retrieve_world_id,
validate_request,
)
from models.totalmobile_job_model import TotalmobileJobModel
def test_create_task_name_returns_correct_name_when_called():
# arrange
case_data_dict = {"qiD.Serial_Number": "90001"}
model = TotalmobileJobModel("OPN2101A", "world", case_data_dict)
# act
result = create_task_name(model)
# assert
assert result.startswith("OPN2101A-90001-")
def test_create_task_name_returns_unique_name_each_time_when_passed_the_same_model():
# arrange
case_data_dict = {"qiD.Serial_Number": "90001"}
model = TotalmobileJobModel("OPN2101A", "world", case_data_dict)
# act
result1 = create_task_name(model)
result2 = create_task_name(model)
# assert
assert result1 != result2
@mock.patch.object(Config, "from_env")
def test_prepare_tasks_returns_an_expected_number_of_tasks_when_given_a_list_of_job_models(
_mock_config_from_env,
):
# arrange
_mock_config_from_env.return_value = Config(
"", "", "", "", "", "", "", "", "", "", ""
)
model1 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90001"})
model2 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90002"})
# act
result = prepare_tasks([model1, model2])
# assert
assert len(result) == 2
assert result[0] != result[1]
@mock.patch.object(Config, "from_env")
def test_prepare_tasks_returns_expected_tasks_when_given_a_list_of_job_models(
_mock_config_from_env,
):
# arrange
_mock_config_from_env.return_value = Config(
"",
"",
"",
"",
"totalmobile_jobs_queue_id",
"cloud_function",
"project",
"region",
"rest_api_url",
"gusty",
"cloud_function_sa",
)
model1 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90001"})
model2 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90002"})
# act
result = prepare_tasks([model1, model2])
# assert
assert result[0].parent == "totalmobile_jobs_queue_id"
assert result[0].task.name.startswith(
"totalmobile_jobs_queue_id/tasks/OPN2101A-90001-"
)
assert (
result[0].task.http_request.url
== "https://region-project.cloudfunctions.net/cloud_function"
)
assert result[0].task.http_request.body == model1.json().encode()
assert (
result[0].task.http_request.oidc_token.service_account_email
== "cloud_function_sa"
)
assert result[1].parent == "totalmobile_jobs_queue_id"
assert result[1].task.name.startswith(
"totalmobile_jobs_queue_id/tasks/OPN2101A-90002-"
)
assert (
result[1].task.http_request.url
== "https://region-project.cloudfunctions.net/cloud_function"
)
assert result[1].task.http_request.body == model2.json().encode()
assert (
result[1].task.http_request.oidc_token.service_account_email
== "cloud_function_sa"
)
@mock.patch.object(blaise_restapi.Client, "get_instrument_data")
def test_retrieve_case_data_calls_the_rest_api_client_with_the_correct_parameters(
_mock_rest_api_client,
):
# arrange
config = Config("", "", "", "", "", "", "", "", "rest_api_url", "gusty", "")
_mock_rest_api_client.return_value = {
"instrumentName": "DST2106Z",
"instrumentId": "12345-12345-12345-12345-12345",
"reportingData": "",
}
blaise_server_park = "gusty"
instrument_name = "OPN2101A"
fields = [
"qDataBag.UPRN_Latitude",
"qDataBag.UPRN_Longitude",
"qDataBag.Prem1",
"qDataBag.Prem2",
"qDataBag.Prem3",
"qDataBag.PostTown",
"qDataBag.PostCode",
"qDataBag.TelNo",
"qDataBag.TelNo2",
"hOut",
"srvStat",
"qiD.Serial_Number",
]
# act
retrieve_case_data(instrument_name, config)
# assert
_mock_rest_api_client.assert_called_with(
blaise_server_park, instrument_name, fields
)
@mock.patch.object(blaise_restapi.Client, "get_instrument_data")
def test_retrieve_case_data_returns_the_case_data_supplied_by_the_rest_api_client(
_mock_rest_api_client,
):
# arrange
config = Config("", "", "", "", "", "", "", "", "rest_api_url", "gusty", "")
_mock_rest_api_client.return_value = {
"instrumentName": "DST2106Z",
"instrumentId": "12345-12345-12345-12345-12345",
"reportingData": [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "110"},
],
}
instrument_name = "OPN2101A"
# act
result = retrieve_case_data(instrument_name, config)
# assert
assert result == [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "110"},
]
@mock.patch.object(OptimiseClient, "get_world")
def test_retrieve_world_id_returns_a_world_id(_mock_optimise_client):
# arrange
config = Config(
"totalmobile_url",
"totalmobile_instance",
"totalmobile_client_id",
"totalmobile_client_secret",
"",
"",
"",
"",
"rest_api_url",
"gusty",
"",
)
_mock_optimise_client.return_value = {
"id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"identity": {"reference": "test"},
"type": "foo",
}
# act
result = retrieve_world_id(config)
# assert
assert result == "3fa85f64-5717-4562-b3fc-2c963f66afa6"
def test_map_totalmobile_job_models_maps_the_correct_list_of_models():
# arrange
instrument_name = "OPN2101A"
world_id = "Earth"
case_data = [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "120"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "130"},
]
# act
result = map_totalmobile_job_models(case_data, world_id, instrument_name)
# assert
assert result == [
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"}
),
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10020", "qhAdmin.HOut": "120"}
),
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10030", "qhAdmin.HOut": "130"}
),
]
@mock.patch.object(tasks_v2.CloudTasksAsyncClient, "create_task")
def test_create_tasks_gets_called_once_for_each_task_given_to_it(mock_create_task):
# arrange
task_client = tasks_v2.CloudTasksAsyncClient()
mock_create_task.return_value = {}
task_requests = [
tasks_v2.CreateTaskRequest(parent="qid1", task=tasks_v2.Task()),
tasks_v2.CreateTaskRequest(parent="qid2", task=tasks_v2.Task()),
]
# act
create_tasks(task_requests, task_client)
# assert
mock_create_task.assert_has_calls(
[mock.call(task_request) for task_request in task_requests]
)
@mock.patch.object(tasks_v2.CloudTasksAsyncClient, "create_task")
def test_create_tasks_returns_the_correct_number_of_tasks(mock_create_task):
# arrange
task_client = tasks_v2.CloudTasksAsyncClient()
mock_create_task.return_value = {}
task_requests = [
tasks_v2.CreateTaskRequest(parent="qid1", task=tasks_v2.Task()),
tasks_v2.CreateTaskRequest(parent="qid2", task=tasks_v2.Task()),
]
# act
result = create_tasks(task_requests, task_client)
# assert
assert len(result) == 2
def test_filter_cases_returns_cases_where_srv_stat_is_not_3_or_hOut_is_not_360_or_390():
# arrange
cases = [
{
# should return
"srvStat": "1",
"hOut": "210",
},
{
# should return
"srvStat": "2",
"hOut": "210",
},
{
# should not return
"srvStat": "3",
"hOut": "360",
},
{
# should not return
"srvStat": "3",
"hOut": "390",
},
{
# should not return
"srvStat": "3",
"hOut": "210",
},
{
# should not return
"srvStat": "1",
"hOut": "360",
},
{
# should not return
"srvStat": "2",
"hOut": "390",
},
]
# act
result = filter_cases(cases)
# assert
assert result == [{"hOut": "210", "srvStat": "1"}, {"hOut": "210", "srvStat": "2"}]
def test_validate_request(mock_create_job_task):
validate_request(mock_create_job_task)
def test_validate_request_missing_fields():
with pytest.raises(Exception) as err:
validate_request({"world_id": ""})
assert (
str(err.value) == "Required fields missing from request payload: ['instrument']"
)
@mock.patch.object(Config, "from_env")
@mock.patch("cloud_functions.create_instrument_case_tasks.validate_request")
@mock.patch("cloud_functions.create_instrument_case_tasks.retrieve_world_id")
@mock.patch("cloud_functions.create_instrument_case_tasks.retrieve_case_data")
@mock.patch("cloud_functions.create_instrument_case_tasks.filter_cases")
@mock.patch("cloud_functions.create_instrument_case_tasks.map_totalmobile_job_models")
@mock.patch("cloud_functions.create_instrument_case_tasks.prepare_tasks")
def test_create_case_tasks_for_instrument(
mock_prepare_tasks,
mock_map_totalmobile_job_models,
mock_filter_cases,
mock_retrieve_case_data,
mock_retrieve_world_id,
mock_validate_request,
mock_from_env,
):
# arrange
mock_request = flask.Request.from_values(json={"instrument": "OPN2101A"})
# act
result = create_instrument_case_tasks(mock_request)
# assert
assert result == "Done"
@mock.patch.object(Config, "from_env")
def test_create_instrument_case_tasks_error(mock_from_env):
# arrange
mock_request = flask.Request.from_values(json={"questionnaire": ""})
# assert
with pytest.raises(Exception) as err:
create_instrument_case_tasks(mock_request)
assert (
str(err.value) == "Required fields missing from request payload: ['instrument']"
)
| from unittest import mock
import blaise_restapi
import flask
import pytest
from google.cloud import tasks_v2
from appconfig import Config
from client.optimise import OptimiseClient
from cloud_functions.create_instrument_case_tasks import (
create_instrument_case_tasks,
create_task_name,
create_tasks,
filter_cases,
map_totalmobile_job_models,
prepare_tasks,
retrieve_case_data,
retrieve_world_id,
validate_request,
)
from models.totalmobile_job_model import TotalmobileJobModel
def test_create_task_name_returns_correct_name_when_called():
# arrange
case_data_dict = {"qiD.Serial_Number": "90001"}
model = TotalmobileJobModel("OPN2101A", "world", case_data_dict)
# act
result = create_task_name(model)
# assert
assert result.startswith("OPN2101A-90001-")
def test_create_task_name_returns_unique_name_each_time_when_passed_the_same_model():
# arrange
case_data_dict = {"qiD.Serial_Number": "90001"}
model = TotalmobileJobModel("OPN2101A", "world", case_data_dict)
# act
result1 = create_task_name(model)
result2 = create_task_name(model)
# assert
assert result1 != result2
@mock.patch.object(Config, "from_env")
def test_prepare_tasks_returns_an_expected_number_of_tasks_when_given_a_list_of_job_models(
_mock_config_from_env,
):
# arrange
_mock_config_from_env.return_value = Config(
"", "", "", "", "", "", "", "", "", "", ""
)
model1 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90001"})
model2 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90002"})
# act
result = prepare_tasks([model1, model2])
# assert
assert len(result) == 2
assert result[0] != result[1]
@mock.patch.object(Config, "from_env")
def test_prepare_tasks_returns_expected_tasks_when_given_a_list_of_job_models(
_mock_config_from_env,
):
# arrange
_mock_config_from_env.return_value = Config(
"",
"",
"",
"",
"totalmobile_jobs_queue_id",
"cloud_function",
"project",
"region",
"rest_api_url",
"gusty",
"cloud_function_sa",
)
model1 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90001"})
model2 = TotalmobileJobModel("OPN2101A", "world", {"qiD.Serial_Number": "90002"})
# act
result = prepare_tasks([model1, model2])
# assert
assert result[0].parent == "totalmobile_jobs_queue_id"
assert result[0].task.name.startswith(
"totalmobile_jobs_queue_id/tasks/OPN2101A-90001-"
)
assert (
result[0].task.http_request.url
== "https://region-project.cloudfunctions.net/cloud_function"
)
assert result[0].task.http_request.body == model1.json().encode()
assert (
result[0].task.http_request.oidc_token.service_account_email
== "cloud_function_sa"
)
assert result[1].parent == "totalmobile_jobs_queue_id"
assert result[1].task.name.startswith(
"totalmobile_jobs_queue_id/tasks/OPN2101A-90002-"
)
assert (
result[1].task.http_request.url
== "https://region-project.cloudfunctions.net/cloud_function"
)
assert result[1].task.http_request.body == model2.json().encode()
assert (
result[1].task.http_request.oidc_token.service_account_email
== "cloud_function_sa"
)
@mock.patch.object(blaise_restapi.Client, "get_instrument_data")
def test_retrieve_case_data_calls_the_rest_api_client_with_the_correct_parameters(
_mock_rest_api_client,
):
# arrange
config = Config("", "", "", "", "", "", "", "", "rest_api_url", "gusty", "")
_mock_rest_api_client.return_value = {
"instrumentName": "DST2106Z",
"instrumentId": "12345-12345-12345-12345-12345",
"reportingData": "",
}
blaise_server_park = "gusty"
instrument_name = "OPN2101A"
fields = [
"qDataBag.UPRN_Latitude",
"qDataBag.UPRN_Longitude",
"qDataBag.Prem1",
"qDataBag.Prem2",
"qDataBag.Prem3",
"qDataBag.PostTown",
"qDataBag.PostCode",
"qDataBag.TelNo",
"qDataBag.TelNo2",
"hOut",
"srvStat",
"qiD.Serial_Number",
]
# act
retrieve_case_data(instrument_name, config)
# assert
_mock_rest_api_client.assert_called_with(
blaise_server_park, instrument_name, fields
)
@mock.patch.object(blaise_restapi.Client, "get_instrument_data")
def test_retrieve_case_data_returns_the_case_data_supplied_by_the_rest_api_client(
_mock_rest_api_client,
):
# arrange
config = Config("", "", "", "", "", "", "", "", "rest_api_url", "gusty", "")
_mock_rest_api_client.return_value = {
"instrumentName": "DST2106Z",
"instrumentId": "12345-12345-12345-12345-12345",
"reportingData": [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "110"},
],
}
instrument_name = "OPN2101A"
# act
result = retrieve_case_data(instrument_name, config)
# assert
assert result == [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "110"},
]
@mock.patch.object(OptimiseClient, "get_world")
def test_retrieve_world_id_returns_a_world_id(_mock_optimise_client):
# arrange
config = Config(
"totalmobile_url",
"totalmobile_instance",
"totalmobile_client_id",
"totalmobile_client_secret",
"",
"",
"",
"",
"rest_api_url",
"gusty",
"",
)
_mock_optimise_client.return_value = {
"id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"identity": {"reference": "test"},
"type": "foo",
}
# act
result = retrieve_world_id(config)
# assert
assert result == "3fa85f64-5717-4562-b3fc-2c963f66afa6"
def test_map_totalmobile_job_models_maps_the_correct_list_of_models():
# arrange
instrument_name = "OPN2101A"
world_id = "Earth"
case_data = [
{"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"},
{"qiD.Serial_Number": "10020", "qhAdmin.HOut": "120"},
{"qiD.Serial_Number": "10030", "qhAdmin.HOut": "130"},
]
# act
result = map_totalmobile_job_models(case_data, world_id, instrument_name)
# assert
assert result == [
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10010", "qhAdmin.HOut": "110"}
),
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10020", "qhAdmin.HOut": "120"}
),
TotalmobileJobModel(
"OPN2101A", "Earth", {"qiD.Serial_Number": "10030", "qhAdmin.HOut": "130"}
),
]
@mock.patch.object(tasks_v2.CloudTasksAsyncClient, "create_task")
def test_create_tasks_gets_called_once_for_each_task_given_to_it(mock_create_task):
# arrange
task_client = tasks_v2.CloudTasksAsyncClient()
mock_create_task.return_value = {}
task_requests = [
tasks_v2.CreateTaskRequest(parent="qid1", task=tasks_v2.Task()),
tasks_v2.CreateTaskRequest(parent="qid2", task=tasks_v2.Task()),
]
# act
create_tasks(task_requests, task_client)
# assert
mock_create_task.assert_has_calls(
[mock.call(task_request) for task_request in task_requests]
)
@mock.patch.object(tasks_v2.CloudTasksAsyncClient, "create_task")
def test_create_tasks_returns_the_correct_number_of_tasks(mock_create_task):
# arrange
task_client = tasks_v2.CloudTasksAsyncClient()
mock_create_task.return_value = {}
task_requests = [
tasks_v2.CreateTaskRequest(parent="qid1", task=tasks_v2.Task()),
tasks_v2.CreateTaskRequest(parent="qid2", task=tasks_v2.Task()),
]
# act
result = create_tasks(task_requests, task_client)
# assert
assert len(result) == 2
def test_filter_cases_returns_cases_where_srv_stat_is_not_3_or_hOut_is_not_360_or_390():
# arrange
cases = [
{
# should return
"srvStat": "1",
"hOut": "210",
},
{
# should return
"srvStat": "2",
"hOut": "210",
},
{
# should not return
"srvStat": "3",
"hOut": "360",
},
{
# should not return
"srvStat": "3",
"hOut": "390",
},
{
# should not return
"srvStat": "3",
"hOut": "210",
},
{
# should not return
"srvStat": "1",
"hOut": "360",
},
{
# should not return
"srvStat": "2",
"hOut": "390",
},
]
# act
result = filter_cases(cases)
# assert
assert result == [{"hOut": "210", "srvStat": "1"}, {"hOut": "210", "srvStat": "2"}]
def test_validate_request(mock_create_job_task):
validate_request(mock_create_job_task)
def test_validate_request_missing_fields():
with pytest.raises(Exception) as err:
validate_request({"world_id": ""})
assert (
str(err.value) == "Required fields missing from request payload: ['instrument']"
)
@mock.patch.object(Config, "from_env")
@mock.patch("cloud_functions.create_instrument_case_tasks.validate_request")
@mock.patch("cloud_functions.create_instrument_case_tasks.retrieve_world_id")
@mock.patch("cloud_functions.create_instrument_case_tasks.retrieve_case_data")
@mock.patch("cloud_functions.create_instrument_case_tasks.filter_cases")
@mock.patch("cloud_functions.create_instrument_case_tasks.map_totalmobile_job_models")
@mock.patch("cloud_functions.create_instrument_case_tasks.prepare_tasks")
def test_create_case_tasks_for_instrument(
mock_prepare_tasks,
mock_map_totalmobile_job_models,
mock_filter_cases,
mock_retrieve_case_data,
mock_retrieve_world_id,
mock_validate_request,
mock_from_env,
):
# arrange
mock_request = flask.Request.from_values(json={"instrument": "OPN2101A"})
# act
result = create_instrument_case_tasks(mock_request)
# assert
assert result == "Done"
@mock.patch.object(Config, "from_env")
def test_create_instrument_case_tasks_error(mock_from_env):
# arrange
mock_request = flask.Request.from_values(json={"questionnaire": ""})
# assert
with pytest.raises(Exception) as err:
create_instrument_case_tasks(mock_request)
assert (
str(err.value) == "Required fields missing from request payload: ['instrument']"
)
| en | 0.723372 | # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # act # assert # arrange # should return # should return # should not return # should not return # should not return # should not return # should not return # act # assert # arrange # act # assert # arrange # assert | 2.329638 | 2 |
train_cifar10_vs_ti.py | goel96vibhor/semisup-adv | 1 | 6620150 | <reponame>goel96vibhor/semisup-adv<filename>train_cifar10_vs_ti.py
"""
Train data sourcing model. Based on code from
https://github.com/hysts/pytorch_shake_shake
"""
import argparse
from collections import OrderedDict
import importlib
import json
import logging
import pathlib
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms
from utils import *
from dataloader import *
from datasets import SemiSupervisedDataset, DATASETS
from diff_distribution_dataload_helper import get_new_distribution_loader
import pdb
import pandas as pd
from dataloader import get_cifar10_vs_ti_loader, get_tinyimages_loader
torch.backends.cudnn.benchmark = True
# logging.basicConfig(
# format='[%(asctime)s %(name)s %(levelname)s] - %(message)s',
# datefmt='%Y/%m/%d %H:%M:%S',
# level=logging.INFO)
# logger = logging.getLogger(__name__)
global_step = 0
use_cuda = torch.cuda.is_available()
def str2bool(s):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise RuntimeError('Boolean value expected')
def mean_std_normalize(input, mean, std):
# logger.info(f'Mean standard normalize input shape: {input.shape}')
input = input.transpose(-1,-3).transpose(-2,-3).cuda()
assert input.shape[-1] == mean.shape[-1], f"last input dimension, {input.shape} does not match mean dimension, {mean.shape}"
assert input.shape[-1] == std.shape[-1], f"last input dimension, {input.shape} does not match std dimension, {std.shape}"
mean = mean.repeat(*list(input.shape[:-1]), 1).cuda()
std = std.repeat(*list(input.shape[:-1]), 1).cuda()
output = input.sub(mean).div(std)
output = output.transpose(-1,-3).transpose(-2,-1)
return output
def load_base_model(args):
checkpoint = torch.load(args.base_model_path)
state_dict = checkpoint.get('state_dict', checkpoint)
num_classes = checkpoint.get('num_classes', args.base_num_classes)
normalize_input = checkpoint.get('normalize_input', False)
print("checking if input normalized")
print(normalize_input)
logging.info("using %s model for evaluation from path %s" %(args.base_model, args.base_model_path))
base_model = get_model(args.base_model, num_classes=num_classes, normalize_input=normalize_input)
if use_cuda:
base_model = torch.nn.DataParallel(base_model).cuda()
cudnn.benchmark = True
def strip_data_parallel(s):
if s.startswith('module.1'):
return 'module.' + s[len('module.1.'):]
elif s.startswith('module.0'):
return None
else:
return s
if not all([k.startswith('module') for k in state_dict]):
state_dict = {'module.' + k: v for k, v in state_dict.items()}
new_state_dict = {}
for k,v in state_dict.items():
k_new = strip_data_parallel(k)
if k_new:
new_state_dict[k_new] = v
state_dict = new_state_dict
# state_dict = {strip_data_parallel(k): v for k, v in state_dict.items()}
else:
def strip_data_parallel(s):
if s.startswith('module.1'):
return s[len('module.1.'):]
elif s.startswith('module.0'):
return None
if s.startswith('module'):
return s[len('module.'):]
else:
return s
state_dict = {strip_data_parallel(k): v for k, v in state_dict.items()}
base_model.load_state_dict(state_dict)
return base_model
def parse_args():
parser = argparse.ArgumentParser()
# model config
# parser.add_argument('--model', type=str, default='wrn-28-10')
parser.add_argument('--dataset', type=str, default='custom', help='The dataset',
choices=['cifar10', 'svhn', 'custom', 'cinic10', 'benrecht_cifar10', 'tinyimages', 'unlabeled_percy_500k'])
# detector model config
parser.add_argument('--detector-model', default='wrn-28-10', type=str, help='Name of the detector model (see utils.get_model)')
parser.add_argument('--use-old-detector', default=0, type=int, help='Use detector model for evaluation')
parser.add_argument('--detector_model_path', default = 'selection_model/selection_model.pth', type = str, help='Model for attack evaluation')
parser.add_argument('--n_classes', type=int, default=11, help='Number of classes for detector model')
parser.add_argument('--random_split_version', type=int, default=2, help='Version of random split')
# base model configs
parser.add_argument('--also-use-base-model', default=0, type=int, help='Use base model for confusion matrix evaluation')
parser.add_argument('--base_model_path', help='Base Model path')
parser.add_argument('--base_model', '-bm', default='resnet-20', type=str, help='Name of the base model')
parser.add_argument('--base_num_classes', type=int, default=10, help='Number of classes for base model')
parser.add_argument('--base_normalize', type=int, default=0, help='Normalze input for base model')
# run config
parser.add_argument('--output_dir', default='selection_model',type=str, required=True)
parser.add_argument('--test_name', default='', help='Test name to give proper subdirectory to model for saving checkpoint')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--num_workers', type=int, default=7)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--store_to_dataframe', default=0, type=int, help='Store confidences to dataframe')
# Semi-supervised training configuration
parser.add_argument('--aux_data_filename', default='ti_500K_pseudo_labeled.pickle', type=str,
help='Path to pickle file containing unlabeled data and pseudo-labels used for RST')
parser.add_argument('--train_take_amount', default=None, type=int, help='Number of random aux examples to retain. None retains all aux data.')
parser.add_argument('--aux_take_amount', default=None, type=int, help='Number of random aux examples to retain. '
'None retains all aux data.')
parser.add_argument('--remove_pseudo_labels', action='store_true', default=False, help='Performs training without pseudo-labels (rVAT)')
parser.add_argument('--entropy_weight', type=float, default=0.0, help='Weight on entropy loss')
# optim config
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--base_lr', type=float, default=0.2)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--nesterov', type=str2bool, default=True)
parser.add_argument('--lr_min', type=float, default=0)
#train configs
parser.add_argument('--num_images', type=int, help='Number of images in dataset')
parser.add_argument('--even_odd', type=int, default = 0, help='Filter train, test data for even odd indices')
parser.add_argument('--ti_start_index', type=int, default=0, help='Starting index of image')
parser.add_argument('--load_ti_head_tail', type=int, default = 0, help='Load ti head tail indices')
parser.add_argument('--class11_weight', type=float, default=0.1)
parser.add_argument('--use_ti_data_for_training', default=1, type=int, help='Whether to use ti data for training')
args = parser.parse_args()
# 10 CIFAR10 classes and one non-CIFAR10 class
model_config = OrderedDict([
# ('name', args.model),
('n_classes', args.n_classes),
('detector_model_name', args.detector_model),
('use_old_detector', args.use_old_detector),
('detector_model_path', args.detector_model_path)
])
optim_config = OrderedDict([
('epochs', args.epochs),
('batch_size', args.batch_size),
('base_lr', args.base_lr),
('weight_decay', args.weight_decay),
('momentum', args.momentum),
('nesterov', args.nesterov),
('lr_min', args.lr_min),
('cifar10_fraction', 0.5)
])
data_config = OrderedDict([
('dataset', 'CIFAR10VsTinyImages'),
('dataset_dir', args.data_dir),
])
run_config = OrderedDict([
('seed', args.seed),
('outdir', args.output_dir),
('num_workers', args.num_workers),
('device', args.device),
('save_freq', args.save_freq),
])
config = OrderedDict([
('model_config', model_config),
('optim_config', optim_config),
('data_config', data_config),
('run_config', run_config),
])
return config, args
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def _cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
def get_cosine_annealing_scheduler(optimizer, optim_config):
total_steps = optim_config['epochs'] * optim_config['steps_per_epoch']
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: _cosine_annealing(
step,
total_steps,
1, # since lr_lambda computes multiplicative factor
optim_config['lr_min'] / optim_config['base_lr']))
return scheduler
def train(epoch, model, optimizer, scheduler, criterion, train_loader,
run_config):
global global_step
logging.info('Train {}'.format(epoch))
model.train()
device = torch.device(run_config['device'])
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
accuracy_c10_meter = AverageMeter()
accuracy_c10_v_ti_meter = AverageMeter()
start = time.time()
class_counts = np.zeros(11)
for step, (data, targets, index) in enumerate(train_loader):
global_step += 1
scheduler.step()
data = data.to(device)
targets = targets.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, dim=1)
unique_targets = np.array(targets.unique(return_counts=True)[0].cpu())
unique_counts = np.array(targets.unique(return_counts=True)[1].cpu())
class_counts[unique_targets] = class_counts[unique_targets] + unique_counts
if step == 0:
print(data[1,:])
print(outputs[1,:])
print(preds)
# print(indexes)
print(targets)
loss_ = loss.item()
correct_ = preds.eq(targets).sum().item()
num = data.size(0)
accuracy = correct_ / num
loss_meter.update(loss_, num)
accuracy_meter.update(accuracy, num)
is_c10 = targets != 10
num_c10 = is_c10.float().sum().item()
# Computing cifar10 accuracy
if num_c10 > 0:
_, preds_c10 = torch.max(outputs[is_c10, :10], dim=1)
correct_c10_ = preds_c10.eq(targets[is_c10]).sum().item()
accuracy_c10_meter.update(correct_c10_ / num_c10, num_c10)
# Computing cifar10 vs. ti accuracy
correct_c10_v_ti_ = (preds != 10).float().eq(
is_c10.float()).sum().item()
accuracy_c10_v_ti_meter.update(correct_c10_v_ti_ / num, num)
if step % 100 == 0:
logging.info('Epoch {} Step {}/{} '
'Loss {:.4f} ({:.4f}) '
'Accuracy {:.4f} ({:.4f}) '
'C10 Acc {:.4f} ({:.4f}) '
'Vs Acc {:.4f} ({:.4f})'.format(
epoch,
step,
len(train_loader),
loss_meter.val,
loss_meter.avg,
accuracy_meter.val,
accuracy_meter.avg,
accuracy_c10_meter.val,
accuracy_c10_meter.avg,
accuracy_c10_v_ti_meter.val,
accuracy_c10_v_ti_meter.avg
))
elapsed = time.time() - start
logging.info('Target class count: '+str(class_counts))
logging.info('Elapsed {:.2f}'.format(elapsed))
train_log = OrderedDict({
'epoch':
epoch,
'train':
OrderedDict({
'loss': loss_meter.avg,
'accuracy': accuracy_meter.avg,
'accuracy_c10': accuracy_c10_meter.avg,
'accuracy_vs': accuracy_c10_v_ti_meter.avg,
'time': elapsed,
}),
})
return train_log
def test(args, epoch, model, criterion, test_loader, run_config, mean, std, base_model=None, dataframe_file=None):
logging.info('Test {}'.format(epoch))
dataset = args.dataset
model.eval()
if base_model != None:
base_model.eval()
device = torch.device(run_config['device'])
loss_meter = AverageMeter()
correct_c10_meter = AverageMeter()
correct_c10_v_ti_meter = AverageMeter()
correct_on_predc10_meter = AverageMeter()
pseudocorrect_on_predti_meter = AverageMeter()
start = time.time()
count_total = 0
c10_correct_total = 0
c10_count_total = 0
ti_count_total = 0
ti_correct_total = 0
total = 0
vs_correct_total = 0
predc10_correct_total = 0
predc10_count_total = 0
predti_pseudocorrect_total = 0
predti_count_total = 0
base_c10_correct_total = 0
base_predc10_correct_total = 0
base_predti_correct_total = 0
base_c10_count_total = 0
with torch.no_grad():
softmax = torch.nn.Softmax(dim=1)
cifar_conf = []
noncifar_conf = []
noncifar_all_confs = []
id_list = []
df = pd.DataFrame()
for step, (data, targets, indexes) in enumerate(test_loader):
data = data.to(device)
targets = targets.to(device)
id_list = np.array(indexes)
target_list = targets.cpu().detach().numpy()
# TODO: This is hacky rn. See the right way to load TinyImages
if dataset == 'tinyimages':
# logger.info(f'Tiny images data shape: {data.shape}')
data = data.transpose(1, 3).type(torch.FloatTensor)
# logger.info(f'Tiny images data shape: {data.shape}')
targets = targets.type(torch.long)
# print(data.shape)
# print(tuple(data.shape))
# print(torch.transpose(data,1,3).view(-1,*tuple(data_shape[2:])).shape)
# outputs = model(normalize_func(tensor=data.squeeze(1)).reshape(data_shape))
outputs = model(mean_std_normalize(data, mean, std))
loss = criterion(outputs, targets)
outputs = softmax(outputs)
conf, preds = torch.max(outputs, dim=1)
if base_model != None:
if args.base_normalize:
base_outputs = base_model(mean_std_normalize(data, mean, std))
else:
base_outputs = base_model(data)
base_outputs = softmax(base_outputs)
_, base_preds = torch.max(base_outputs, dim=1)
if step == 0:
print(data[1,:])
print(outputs[1,:])
print(preds)
# print(indexes)
print(targets)
if step%100 == 0:
print(step)
# is_pred_c10 = preds != 10
is_predc10 = preds != 10
is_pred_nonc10 = preds == 10
cifar_conf.extend(conf[is_predc10].tolist())
noncifar_conf.extend(conf[is_pred_nonc10].tolist())
if len(noncifar_all_confs) < 30:
noncifar_all_confs.extend(outputs[is_pred_nonc10].tolist())
loss_ = loss.item()
num = data.size(0)
loss_meter.update(loss_, num)
is_c10 = targets != 10
# cifar10 accuracy
if is_c10.float().sum() > 0:
_, preds_c10 = torch.max(outputs[is_c10, :10], dim=1)
correct_c10_ = preds_c10.eq(targets[is_c10]).sum().item()
if base_model != None:
_, base_preds_c10 = torch.max(base_outputs[is_c10, :10], dim=1)
base_c10_correct_total += base_preds_c10.eq(targets[is_c10]).sum().item()
base_c10_count_total += is_c10.sum()
if step == 0:
print("-----------------------------------------------------")
print(base_preds_c10)
print(preds_c10)
print(targets)
c10_correct_total += correct_c10_
c10_count_total += is_c10.sum()
correct_c10_meter.update(correct_c10_, 1)
# cifar10 vs. TI accuracy
correct_c10_v_ti_ = (is_predc10).eq(is_c10).sum().item()
correct_c10_v_ti_meter.update(correct_c10_v_ti_, 1)
total += len(targets)
vs_correct_total += correct_c10_v_ti_
# print("Step %d, batch size %d, correct_c10_vs_ti_count %d" %(step, len(targets), correct_c10_v_ti_))
if is_predc10.float().sum() > 0:
_, preds_on_predc10 = torch.max(outputs[is_predc10, :10], dim=1)
correct_on_predc10_ = preds_on_predc10.eq(targets[is_predc10]).sum().item()
if base_model != None:
_, base_preds_on_predc10 = torch.max(base_outputs[is_predc10, :10], dim=1)
base_predc10_correct_total += base_preds_on_predc10.eq(targets[is_predc10]).sum().item()
predc10_correct_total += correct_on_predc10_
predc10_count_total += is_predc10.sum()
correct_on_predc10_meter.update(correct_on_predc10_, 1)
is_predti = preds == 10
if is_predti.float().sum() > 0:
_, preds_on_predti = torch.max(outputs[is_predti, :10], dim=1)
pseudocorrect_on_predti_ = preds_on_predti.eq(targets[is_predti]).sum().item()
if base_model != None:
_, base_preds_on_predti = torch.max(base_outputs[is_predti, :10], dim=1)
base_predti_correct_total += base_preds_on_predti.eq(targets[is_predti]).sum().item()
predti_pseudocorrect_total += pseudocorrect_on_predti_
predti_count_total += is_predti.sum()
pseudocorrect_on_predti_meter.update(pseudocorrect_on_predti_, 1)
if args.store_to_dataframe:
batch_df = pd.DataFrame(np.column_stack([id_list, target_list, outputs.cpu().detach().numpy(), base_outputs.cpu().detach().numpy(),
preds.cpu().detach().numpy(), base_preds.cpu().detach().numpy(),
is_c10.cpu().detach().numpy(),is_predc10.cpu().detach().numpy(),
is_predti.cpu().detach().numpy()]))
# print("Batch %d, batch df shape %s" %(step, str(batch_df.shape)))
df = df.append(batch_df)
test_targets = np.array(test_loader.dataset.targets)
accuracy_c10 = ((c10_correct_total * 1.0) /
(c10_count_total*1.0))
accuracy_vs = ((correct_c10_v_ti_meter.sum*1.0) / total)
logging.info('Epoch {} Loss {:.4f} Accuracy inside C10 {:.4f}'
' C10-vs-TI {:.4f}'.format(
epoch, loss_meter.avg, accuracy_c10, accuracy_vs))
logging.info('Cifar10 correct {} Cifar10 sum {} c10-vs-ti correct {},'
' C10-vs-TI-sum {}'.format(
c10_correct_total, c10_count_total, correct_c10_v_ti_meter.sum, total))
logging.info('Cifar10 correct %d, cifar 10 count %d, predicted c10 correct %d, predicted c10 count %d, predicted ti pseudo correct %d ' \
'predicted ti count %d' %(c10_correct_total, c10_count_total, predc10_correct_total,
predc10_count_total, predti_pseudocorrect_total, predti_count_total))
if base_model != None:
logging.info('base cifar10 correct %d, base predicted c10 correct %d, base predicted TI correct %d'
%(base_c10_correct_total, base_predc10_correct_total, base_predti_correct_total))
logging.info('CIFAR count: {}, Non-CIFAR count: {}'.format(len(cifar_conf), len(noncifar_conf)))
elapsed = time.time() - start
if args.store_to_dataframe:
df.to_csv(dataframe_file, index = False)
# plot_histogram(cifar_conf, noncifar_conf, dataset)
# print('Non cifar probabilities:')
# print(noncifar_all_confs)
test_log = OrderedDict({
'epoch':
epoch,
'test':
OrderedDict({
'loss': loss_meter.avg,
'accuracy_c10': accuracy_c10,
'accuracy_vs': accuracy_vs,
'time': elapsed,
}),
})
return test_log
def main():
# parse command line arguments
config, args = parse_args()
output_dir = args.output_dir
if args.test_name != '':
output_dir = output_dir + '/' + args.test_name
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if config['model_config']['use_old_detector']:
output_file = args.dataset + '.log'
else:
output_file = 'training.log'
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[
logging.FileHandler(os.path.join(output_dir, output_file)),
logging.StreamHandler()
])
logger = logging.getLogger()
dataframe_file = output_dir + '/' + args.dataset + '.csv'
logger.info(json.dumps(config, indent=2))
run_config = config['run_config']
optim_config = config['optim_config']
data_config = config['data_config']
# set random seed
seed = run_config['seed']
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# create output directory
# outdir = pathlib.Path(run_config['outdir'])
# outdir.mkdir(exist_ok=True, parents=True)
save_freq = run_config['save_freq']
# save config as json file in output directory
outpath = os.path.join(output_dir, 'config.json')
with open(outpath, 'w') as fout:
json.dump(config, fout, indent=2)
custom_testset = None
# if args.dataset == 'custom':
# custom_dataset = get_new_distribution_loader()
# print("custom dataset loaded ....")
# transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
# mean = torch.tensor([0.4914, 0.4822, 0.4465])
# std = torch.tensor([
# 0.2470, 0.2435, 0.2616])
# custom_testset = SemiSupervisedDataset(base_dataset=args.dataset,
# train=False, root='data',
# download=True,
# custom_dataset = custom_dataset,
# transform=transform_test)
# mean, std =
# data loaders
# model
model = get_model(config['model_config']['detector_model_name'],
num_classes=config['model_config']['n_classes'],
normalize_input=True)
model = torch.nn.DataParallel(model.cuda())
n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
logger.info('n_params: {}'.format(n_params))
if args.n_classes == 11:
weight = torch.Tensor([1] * 10 + [args.class11_weight])
else:
weight = torch.Tensor([1]* args.n_classes)
criterion = nn.CrossEntropyLoss(reduction='mean',
weight=weight).cuda()
mean = torch.tensor([0.4914, 0.4822, 0.4465])
std = torch.tensor([0.2470, 0.2435, 0.2616])
if args.also_use_base_model:
base_model = load_base_model(args)
else:
base_model = None
if config['model_config']['use_old_detector']:
logging.info("Using old detector model for evaluation")
model = load_detector_model(args)
dl_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 'benrecht_cifar10' or args.dataset == 'cifar10' or args.dataset == 'cinic10':
# custom_dataset = get_new_distribution_loader()
# print("custom dataset loaded ....")
transform_test = transforms.Compose([transforms.ToTensor(), ])
testset = SemiSupervisedDataset(base_dataset=args.dataset,
train=False, root='data',
download=True,
transform=transform_test)
trainset = SemiSupervisedDataset(base_dataset=args.dataset,
train=True, root='data',
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size,
shuffle=False, **dl_kwargs)
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True, **dl_kwargs)
elif args.dataset == 'unlabeled_percy_500k':
print('Loading unlabeled dataset:', args.dataset, '...')
transform_train = transforms.Compose([transforms.ToTensor(), ])
trainset = SemiSupervisedDataset(base_dataset=args.dataset,
root=args.data_dir, train=True,
download=True, transform=transform_train,
aux_data_filename=args.aux_data_filename,
add_aux_labels=not args.remove_pseudo_labels,
aux_take_amount=args.aux_take_amount)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, **kwargs)
test_loader = train_loader
elif args.dataset == 'cifar10_vs_tinyimages':
test_loader, _ = get_cifar10_vs_ti_loader(
optim_config['batch_size'],
run_config['num_workers'],
run_config['device'] != 'cpu',
args.num_images,
optim_config['cifar10_fraction'],
dataset_dir=data_config['dataset_dir'],
even_odd=args.even_odd,
load_ti_head_tail=args.load_ti_head_tail,
random_split_version=args.random_split_version,
ti_start_index=args.ti_start_index,
logger=logger)
elif args.dataset == 'tinyimages':
test_loader = get_tinyimages_loader(
optim_config['batch_size'],
dataset_dir='data/unlabeled_datasets/80M_Tiny_Images/tiny_images_outside_U.bin',
logger=logger,
num_images=249999
)
# normalize_func = transforms.Normalize(mean.unsqueeze(0),std.unsqueeze(0))
logger.info('Instantiated data loaders')
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model=base_model, dataframe_file=dataframe_file)
else:
train_loader, test_loader = get_cifar10_vs_ti_loader(
optim_config['batch_size'],
run_config['num_workers'],
run_config['device'] != 'cpu',
args.num_images,
optim_config['cifar10_fraction'],
dataset_dir=data_config['dataset_dir'],
even_odd = args.even_odd,
load_ti_head_tail = args.load_ti_head_tail,
use_ti_data_for_training = args.use_ti_data_for_training,
random_split_version = args.random_split_version,
ti_start_index = args.ti_start_index,
logger=logger)
# optimizer
# optim_config['steps_per_epoch'] = len(train_loader)
# optimizer = torch.optim.SGD(
# model.parameters(),
# lr=optim_config['base_lr'],
# momentum=optim_config['momentum'],
# weight_decay=optim_config['weight_decay'],
# nesterov=optim_config['nesterov'])
# scheduler = get_cosine_annealing_scheduler(optimizer, optim_config)
# run test before start training
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
epoch_logs = []
if args.even_odd >= 0:
if args.even_odd:
suffix = 'head'
else:
suffix = 'tail'
else:
suffix = ''
for epoch in range(1, optim_config['epochs'] + 1):
train_log = train(epoch, model, optimizer, scheduler, criterion,
train_loader, run_config)
test_log = test(args, epoch, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
epoch_log = train_log.copy()
epoch_log.update(test_log)
epoch_logs.append(epoch_log)
# with open(os.path.join(output_dir, 'log.json'), 'w') as fout:
# json.dump(epoch_logs, fout, indent=2)
if epoch % save_freq == 0 or epoch == optim_config['epochs']:
state = OrderedDict([
('config', config),
('state_dict', model.state_dict()),
('optimizer', optimizer.state_dict()),
('epoch', epoch),
('accuracy_vs', test_log['test']['accuracy_vs']),
])
model_path = os.path.join(output_dir,('model_state_epoch_%s_%d.pth' % (suffix, epoch)))
torch.save(state, model_path)
print("Saved model for path %s" %(model_path))
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
if __name__ == '__main__':
main()
| """
Train data sourcing model. Based on code from
https://github.com/hysts/pytorch_shake_shake
"""
import argparse
from collections import OrderedDict
import importlib
import json
import logging
import pathlib
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
from torchvision import transforms
from utils import *
from dataloader import *
from datasets import SemiSupervisedDataset, DATASETS
from diff_distribution_dataload_helper import get_new_distribution_loader
import pdb
import pandas as pd
from dataloader import get_cifar10_vs_ti_loader, get_tinyimages_loader
torch.backends.cudnn.benchmark = True
# logging.basicConfig(
# format='[%(asctime)s %(name)s %(levelname)s] - %(message)s',
# datefmt='%Y/%m/%d %H:%M:%S',
# level=logging.INFO)
# logger = logging.getLogger(__name__)
global_step = 0
use_cuda = torch.cuda.is_available()
def str2bool(s):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise RuntimeError('Boolean value expected')
def mean_std_normalize(input, mean, std):
# logger.info(f'Mean standard normalize input shape: {input.shape}')
input = input.transpose(-1,-3).transpose(-2,-3).cuda()
assert input.shape[-1] == mean.shape[-1], f"last input dimension, {input.shape} does not match mean dimension, {mean.shape}"
assert input.shape[-1] == std.shape[-1], f"last input dimension, {input.shape} does not match std dimension, {std.shape}"
mean = mean.repeat(*list(input.shape[:-1]), 1).cuda()
std = std.repeat(*list(input.shape[:-1]), 1).cuda()
output = input.sub(mean).div(std)
output = output.transpose(-1,-3).transpose(-2,-1)
return output
def load_base_model(args):
checkpoint = torch.load(args.base_model_path)
state_dict = checkpoint.get('state_dict', checkpoint)
num_classes = checkpoint.get('num_classes', args.base_num_classes)
normalize_input = checkpoint.get('normalize_input', False)
print("checking if input normalized")
print(normalize_input)
logging.info("using %s model for evaluation from path %s" %(args.base_model, args.base_model_path))
base_model = get_model(args.base_model, num_classes=num_classes, normalize_input=normalize_input)
if use_cuda:
base_model = torch.nn.DataParallel(base_model).cuda()
cudnn.benchmark = True
def strip_data_parallel(s):
if s.startswith('module.1'):
return 'module.' + s[len('module.1.'):]
elif s.startswith('module.0'):
return None
else:
return s
if not all([k.startswith('module') for k in state_dict]):
state_dict = {'module.' + k: v for k, v in state_dict.items()}
new_state_dict = {}
for k,v in state_dict.items():
k_new = strip_data_parallel(k)
if k_new:
new_state_dict[k_new] = v
state_dict = new_state_dict
# state_dict = {strip_data_parallel(k): v for k, v in state_dict.items()}
else:
def strip_data_parallel(s):
if s.startswith('module.1'):
return s[len('module.1.'):]
elif s.startswith('module.0'):
return None
if s.startswith('module'):
return s[len('module.'):]
else:
return s
state_dict = {strip_data_parallel(k): v for k, v in state_dict.items()}
base_model.load_state_dict(state_dict)
return base_model
def parse_args():
parser = argparse.ArgumentParser()
# model config
# parser.add_argument('--model', type=str, default='wrn-28-10')
parser.add_argument('--dataset', type=str, default='custom', help='The dataset',
choices=['cifar10', 'svhn', 'custom', 'cinic10', 'benrecht_cifar10', 'tinyimages', 'unlabeled_percy_500k'])
# detector model config
parser.add_argument('--detector-model', default='wrn-28-10', type=str, help='Name of the detector model (see utils.get_model)')
parser.add_argument('--use-old-detector', default=0, type=int, help='Use detector model for evaluation')
parser.add_argument('--detector_model_path', default = 'selection_model/selection_model.pth', type = str, help='Model for attack evaluation')
parser.add_argument('--n_classes', type=int, default=11, help='Number of classes for detector model')
parser.add_argument('--random_split_version', type=int, default=2, help='Version of random split')
# base model configs
parser.add_argument('--also-use-base-model', default=0, type=int, help='Use base model for confusion matrix evaluation')
parser.add_argument('--base_model_path', help='Base Model path')
parser.add_argument('--base_model', '-bm', default='resnet-20', type=str, help='Name of the base model')
parser.add_argument('--base_num_classes', type=int, default=10, help='Number of classes for base model')
parser.add_argument('--base_normalize', type=int, default=0, help='Normalze input for base model')
# run config
parser.add_argument('--output_dir', default='selection_model',type=str, required=True)
parser.add_argument('--test_name', default='', help='Test name to give proper subdirectory to model for saving checkpoint')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--num_workers', type=int, default=7)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--store_to_dataframe', default=0, type=int, help='Store confidences to dataframe')
# Semi-supervised training configuration
parser.add_argument('--aux_data_filename', default='ti_500K_pseudo_labeled.pickle', type=str,
help='Path to pickle file containing unlabeled data and pseudo-labels used for RST')
parser.add_argument('--train_take_amount', default=None, type=int, help='Number of random aux examples to retain. None retains all aux data.')
parser.add_argument('--aux_take_amount', default=None, type=int, help='Number of random aux examples to retain. '
'None retains all aux data.')
parser.add_argument('--remove_pseudo_labels', action='store_true', default=False, help='Performs training without pseudo-labels (rVAT)')
parser.add_argument('--entropy_weight', type=float, default=0.0, help='Weight on entropy loss')
# optim config
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--base_lr', type=float, default=0.2)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--nesterov', type=str2bool, default=True)
parser.add_argument('--lr_min', type=float, default=0)
#train configs
parser.add_argument('--num_images', type=int, help='Number of images in dataset')
parser.add_argument('--even_odd', type=int, default = 0, help='Filter train, test data for even odd indices')
parser.add_argument('--ti_start_index', type=int, default=0, help='Starting index of image')
parser.add_argument('--load_ti_head_tail', type=int, default = 0, help='Load ti head tail indices')
parser.add_argument('--class11_weight', type=float, default=0.1)
parser.add_argument('--use_ti_data_for_training', default=1, type=int, help='Whether to use ti data for training')
args = parser.parse_args()
# 10 CIFAR10 classes and one non-CIFAR10 class
model_config = OrderedDict([
# ('name', args.model),
('n_classes', args.n_classes),
('detector_model_name', args.detector_model),
('use_old_detector', args.use_old_detector),
('detector_model_path', args.detector_model_path)
])
optim_config = OrderedDict([
('epochs', args.epochs),
('batch_size', args.batch_size),
('base_lr', args.base_lr),
('weight_decay', args.weight_decay),
('momentum', args.momentum),
('nesterov', args.nesterov),
('lr_min', args.lr_min),
('cifar10_fraction', 0.5)
])
data_config = OrderedDict([
('dataset', 'CIFAR10VsTinyImages'),
('dataset_dir', args.data_dir),
])
run_config = OrderedDict([
('seed', args.seed),
('outdir', args.output_dir),
('num_workers', args.num_workers),
('device', args.device),
('save_freq', args.save_freq),
])
config = OrderedDict([
('model_config', model_config),
('optim_config', optim_config),
('data_config', data_config),
('run_config', run_config),
])
return config, args
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def _cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
def get_cosine_annealing_scheduler(optimizer, optim_config):
total_steps = optim_config['epochs'] * optim_config['steps_per_epoch']
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: _cosine_annealing(
step,
total_steps,
1, # since lr_lambda computes multiplicative factor
optim_config['lr_min'] / optim_config['base_lr']))
return scheduler
def train(epoch, model, optimizer, scheduler, criterion, train_loader,
run_config):
global global_step
logging.info('Train {}'.format(epoch))
model.train()
device = torch.device(run_config['device'])
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
accuracy_c10_meter = AverageMeter()
accuracy_c10_v_ti_meter = AverageMeter()
start = time.time()
class_counts = np.zeros(11)
for step, (data, targets, index) in enumerate(train_loader):
global_step += 1
scheduler.step()
data = data.to(device)
targets = targets.to(device)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, dim=1)
unique_targets = np.array(targets.unique(return_counts=True)[0].cpu())
unique_counts = np.array(targets.unique(return_counts=True)[1].cpu())
class_counts[unique_targets] = class_counts[unique_targets] + unique_counts
if step == 0:
print(data[1,:])
print(outputs[1,:])
print(preds)
# print(indexes)
print(targets)
loss_ = loss.item()
correct_ = preds.eq(targets).sum().item()
num = data.size(0)
accuracy = correct_ / num
loss_meter.update(loss_, num)
accuracy_meter.update(accuracy, num)
is_c10 = targets != 10
num_c10 = is_c10.float().sum().item()
# Computing cifar10 accuracy
if num_c10 > 0:
_, preds_c10 = torch.max(outputs[is_c10, :10], dim=1)
correct_c10_ = preds_c10.eq(targets[is_c10]).sum().item()
accuracy_c10_meter.update(correct_c10_ / num_c10, num_c10)
# Computing cifar10 vs. ti accuracy
correct_c10_v_ti_ = (preds != 10).float().eq(
is_c10.float()).sum().item()
accuracy_c10_v_ti_meter.update(correct_c10_v_ti_ / num, num)
if step % 100 == 0:
logging.info('Epoch {} Step {}/{} '
'Loss {:.4f} ({:.4f}) '
'Accuracy {:.4f} ({:.4f}) '
'C10 Acc {:.4f} ({:.4f}) '
'Vs Acc {:.4f} ({:.4f})'.format(
epoch,
step,
len(train_loader),
loss_meter.val,
loss_meter.avg,
accuracy_meter.val,
accuracy_meter.avg,
accuracy_c10_meter.val,
accuracy_c10_meter.avg,
accuracy_c10_v_ti_meter.val,
accuracy_c10_v_ti_meter.avg
))
elapsed = time.time() - start
logging.info('Target class count: '+str(class_counts))
logging.info('Elapsed {:.2f}'.format(elapsed))
train_log = OrderedDict({
'epoch':
epoch,
'train':
OrderedDict({
'loss': loss_meter.avg,
'accuracy': accuracy_meter.avg,
'accuracy_c10': accuracy_c10_meter.avg,
'accuracy_vs': accuracy_c10_v_ti_meter.avg,
'time': elapsed,
}),
})
return train_log
def test(args, epoch, model, criterion, test_loader, run_config, mean, std, base_model=None, dataframe_file=None):
logging.info('Test {}'.format(epoch))
dataset = args.dataset
model.eval()
if base_model != None:
base_model.eval()
device = torch.device(run_config['device'])
loss_meter = AverageMeter()
correct_c10_meter = AverageMeter()
correct_c10_v_ti_meter = AverageMeter()
correct_on_predc10_meter = AverageMeter()
pseudocorrect_on_predti_meter = AverageMeter()
start = time.time()
count_total = 0
c10_correct_total = 0
c10_count_total = 0
ti_count_total = 0
ti_correct_total = 0
total = 0
vs_correct_total = 0
predc10_correct_total = 0
predc10_count_total = 0
predti_pseudocorrect_total = 0
predti_count_total = 0
base_c10_correct_total = 0
base_predc10_correct_total = 0
base_predti_correct_total = 0
base_c10_count_total = 0
with torch.no_grad():
softmax = torch.nn.Softmax(dim=1)
cifar_conf = []
noncifar_conf = []
noncifar_all_confs = []
id_list = []
df = pd.DataFrame()
for step, (data, targets, indexes) in enumerate(test_loader):
data = data.to(device)
targets = targets.to(device)
id_list = np.array(indexes)
target_list = targets.cpu().detach().numpy()
# TODO: This is hacky rn. See the right way to load TinyImages
if dataset == 'tinyimages':
# logger.info(f'Tiny images data shape: {data.shape}')
data = data.transpose(1, 3).type(torch.FloatTensor)
# logger.info(f'Tiny images data shape: {data.shape}')
targets = targets.type(torch.long)
# print(data.shape)
# print(tuple(data.shape))
# print(torch.transpose(data,1,3).view(-1,*tuple(data_shape[2:])).shape)
# outputs = model(normalize_func(tensor=data.squeeze(1)).reshape(data_shape))
outputs = model(mean_std_normalize(data, mean, std))
loss = criterion(outputs, targets)
outputs = softmax(outputs)
conf, preds = torch.max(outputs, dim=1)
if base_model != None:
if args.base_normalize:
base_outputs = base_model(mean_std_normalize(data, mean, std))
else:
base_outputs = base_model(data)
base_outputs = softmax(base_outputs)
_, base_preds = torch.max(base_outputs, dim=1)
if step == 0:
print(data[1,:])
print(outputs[1,:])
print(preds)
# print(indexes)
print(targets)
if step%100 == 0:
print(step)
# is_pred_c10 = preds != 10
is_predc10 = preds != 10
is_pred_nonc10 = preds == 10
cifar_conf.extend(conf[is_predc10].tolist())
noncifar_conf.extend(conf[is_pred_nonc10].tolist())
if len(noncifar_all_confs) < 30:
noncifar_all_confs.extend(outputs[is_pred_nonc10].tolist())
loss_ = loss.item()
num = data.size(0)
loss_meter.update(loss_, num)
is_c10 = targets != 10
# cifar10 accuracy
if is_c10.float().sum() > 0:
_, preds_c10 = torch.max(outputs[is_c10, :10], dim=1)
correct_c10_ = preds_c10.eq(targets[is_c10]).sum().item()
if base_model != None:
_, base_preds_c10 = torch.max(base_outputs[is_c10, :10], dim=1)
base_c10_correct_total += base_preds_c10.eq(targets[is_c10]).sum().item()
base_c10_count_total += is_c10.sum()
if step == 0:
print("-----------------------------------------------------")
print(base_preds_c10)
print(preds_c10)
print(targets)
c10_correct_total += correct_c10_
c10_count_total += is_c10.sum()
correct_c10_meter.update(correct_c10_, 1)
# cifar10 vs. TI accuracy
correct_c10_v_ti_ = (is_predc10).eq(is_c10).sum().item()
correct_c10_v_ti_meter.update(correct_c10_v_ti_, 1)
total += len(targets)
vs_correct_total += correct_c10_v_ti_
# print("Step %d, batch size %d, correct_c10_vs_ti_count %d" %(step, len(targets), correct_c10_v_ti_))
if is_predc10.float().sum() > 0:
_, preds_on_predc10 = torch.max(outputs[is_predc10, :10], dim=1)
correct_on_predc10_ = preds_on_predc10.eq(targets[is_predc10]).sum().item()
if base_model != None:
_, base_preds_on_predc10 = torch.max(base_outputs[is_predc10, :10], dim=1)
base_predc10_correct_total += base_preds_on_predc10.eq(targets[is_predc10]).sum().item()
predc10_correct_total += correct_on_predc10_
predc10_count_total += is_predc10.sum()
correct_on_predc10_meter.update(correct_on_predc10_, 1)
is_predti = preds == 10
if is_predti.float().sum() > 0:
_, preds_on_predti = torch.max(outputs[is_predti, :10], dim=1)
pseudocorrect_on_predti_ = preds_on_predti.eq(targets[is_predti]).sum().item()
if base_model != None:
_, base_preds_on_predti = torch.max(base_outputs[is_predti, :10], dim=1)
base_predti_correct_total += base_preds_on_predti.eq(targets[is_predti]).sum().item()
predti_pseudocorrect_total += pseudocorrect_on_predti_
predti_count_total += is_predti.sum()
pseudocorrect_on_predti_meter.update(pseudocorrect_on_predti_, 1)
if args.store_to_dataframe:
batch_df = pd.DataFrame(np.column_stack([id_list, target_list, outputs.cpu().detach().numpy(), base_outputs.cpu().detach().numpy(),
preds.cpu().detach().numpy(), base_preds.cpu().detach().numpy(),
is_c10.cpu().detach().numpy(),is_predc10.cpu().detach().numpy(),
is_predti.cpu().detach().numpy()]))
# print("Batch %d, batch df shape %s" %(step, str(batch_df.shape)))
df = df.append(batch_df)
test_targets = np.array(test_loader.dataset.targets)
accuracy_c10 = ((c10_correct_total * 1.0) /
(c10_count_total*1.0))
accuracy_vs = ((correct_c10_v_ti_meter.sum*1.0) / total)
logging.info('Epoch {} Loss {:.4f} Accuracy inside C10 {:.4f}'
' C10-vs-TI {:.4f}'.format(
epoch, loss_meter.avg, accuracy_c10, accuracy_vs))
logging.info('Cifar10 correct {} Cifar10 sum {} c10-vs-ti correct {},'
' C10-vs-TI-sum {}'.format(
c10_correct_total, c10_count_total, correct_c10_v_ti_meter.sum, total))
logging.info('Cifar10 correct %d, cifar 10 count %d, predicted c10 correct %d, predicted c10 count %d, predicted ti pseudo correct %d ' \
'predicted ti count %d' %(c10_correct_total, c10_count_total, predc10_correct_total,
predc10_count_total, predti_pseudocorrect_total, predti_count_total))
if base_model != None:
logging.info('base cifar10 correct %d, base predicted c10 correct %d, base predicted TI correct %d'
%(base_c10_correct_total, base_predc10_correct_total, base_predti_correct_total))
logging.info('CIFAR count: {}, Non-CIFAR count: {}'.format(len(cifar_conf), len(noncifar_conf)))
elapsed = time.time() - start
if args.store_to_dataframe:
df.to_csv(dataframe_file, index = False)
# plot_histogram(cifar_conf, noncifar_conf, dataset)
# print('Non cifar probabilities:')
# print(noncifar_all_confs)
test_log = OrderedDict({
'epoch':
epoch,
'test':
OrderedDict({
'loss': loss_meter.avg,
'accuracy_c10': accuracy_c10,
'accuracy_vs': accuracy_vs,
'time': elapsed,
}),
})
return test_log
def main():
# parse command line arguments
config, args = parse_args()
output_dir = args.output_dir
if args.test_name != '':
output_dir = output_dir + '/' + args.test_name
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if config['model_config']['use_old_detector']:
output_file = args.dataset + '.log'
else:
output_file = 'training.log'
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[
logging.FileHandler(os.path.join(output_dir, output_file)),
logging.StreamHandler()
])
logger = logging.getLogger()
dataframe_file = output_dir + '/' + args.dataset + '.csv'
logger.info(json.dumps(config, indent=2))
run_config = config['run_config']
optim_config = config['optim_config']
data_config = config['data_config']
# set random seed
seed = run_config['seed']
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# create output directory
# outdir = pathlib.Path(run_config['outdir'])
# outdir.mkdir(exist_ok=True, parents=True)
save_freq = run_config['save_freq']
# save config as json file in output directory
outpath = os.path.join(output_dir, 'config.json')
with open(outpath, 'w') as fout:
json.dump(config, fout, indent=2)
custom_testset = None
# if args.dataset == 'custom':
# custom_dataset = get_new_distribution_loader()
# print("custom dataset loaded ....")
# transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
# mean = torch.tensor([0.4914, 0.4822, 0.4465])
# std = torch.tensor([
# 0.2470, 0.2435, 0.2616])
# custom_testset = SemiSupervisedDataset(base_dataset=args.dataset,
# train=False, root='data',
# download=True,
# custom_dataset = custom_dataset,
# transform=transform_test)
# mean, std =
# data loaders
# model
model = get_model(config['model_config']['detector_model_name'],
num_classes=config['model_config']['n_classes'],
normalize_input=True)
model = torch.nn.DataParallel(model.cuda())
n_params = sum([param.view(-1).size()[0] for param in model.parameters()])
logger.info('n_params: {}'.format(n_params))
if args.n_classes == 11:
weight = torch.Tensor([1] * 10 + [args.class11_weight])
else:
weight = torch.Tensor([1]* args.n_classes)
criterion = nn.CrossEntropyLoss(reduction='mean',
weight=weight).cuda()
mean = torch.tensor([0.4914, 0.4822, 0.4465])
std = torch.tensor([0.2470, 0.2435, 0.2616])
if args.also_use_base_model:
base_model = load_base_model(args)
else:
base_model = None
if config['model_config']['use_old_detector']:
logging.info("Using old detector model for evaluation")
model = load_detector_model(args)
dl_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 'benrecht_cifar10' or args.dataset == 'cifar10' or args.dataset == 'cinic10':
# custom_dataset = get_new_distribution_loader()
# print("custom dataset loaded ....")
transform_test = transforms.Compose([transforms.ToTensor(), ])
testset = SemiSupervisedDataset(base_dataset=args.dataset,
train=False, root='data',
download=True,
transform=transform_test)
trainset = SemiSupervisedDataset(base_dataset=args.dataset,
train=True, root='data',
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size,
shuffle=False, **dl_kwargs)
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True, **dl_kwargs)
elif args.dataset == 'unlabeled_percy_500k':
print('Loading unlabeled dataset:', args.dataset, '...')
transform_train = transforms.Compose([transforms.ToTensor(), ])
trainset = SemiSupervisedDataset(base_dataset=args.dataset,
root=args.data_dir, train=True,
download=True, transform=transform_train,
aux_data_filename=args.aux_data_filename,
add_aux_labels=not args.remove_pseudo_labels,
aux_take_amount=args.aux_take_amount)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, **kwargs)
test_loader = train_loader
elif args.dataset == 'cifar10_vs_tinyimages':
test_loader, _ = get_cifar10_vs_ti_loader(
optim_config['batch_size'],
run_config['num_workers'],
run_config['device'] != 'cpu',
args.num_images,
optim_config['cifar10_fraction'],
dataset_dir=data_config['dataset_dir'],
even_odd=args.even_odd,
load_ti_head_tail=args.load_ti_head_tail,
random_split_version=args.random_split_version,
ti_start_index=args.ti_start_index,
logger=logger)
elif args.dataset == 'tinyimages':
test_loader = get_tinyimages_loader(
optim_config['batch_size'],
dataset_dir='data/unlabeled_datasets/80M_Tiny_Images/tiny_images_outside_U.bin',
logger=logger,
num_images=249999
)
# normalize_func = transforms.Normalize(mean.unsqueeze(0),std.unsqueeze(0))
logger.info('Instantiated data loaders')
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model=base_model, dataframe_file=dataframe_file)
else:
train_loader, test_loader = get_cifar10_vs_ti_loader(
optim_config['batch_size'],
run_config['num_workers'],
run_config['device'] != 'cpu',
args.num_images,
optim_config['cifar10_fraction'],
dataset_dir=data_config['dataset_dir'],
even_odd = args.even_odd,
load_ti_head_tail = args.load_ti_head_tail,
use_ti_data_for_training = args.use_ti_data_for_training,
random_split_version = args.random_split_version,
ti_start_index = args.ti_start_index,
logger=logger)
# optimizer
# optim_config['steps_per_epoch'] = len(train_loader)
# optimizer = torch.optim.SGD(
# model.parameters(),
# lr=optim_config['base_lr'],
# momentum=optim_config['momentum'],
# weight_decay=optim_config['weight_decay'],
# nesterov=optim_config['nesterov'])
# scheduler = get_cosine_annealing_scheduler(optimizer, optim_config)
# run test before start training
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
epoch_logs = []
if args.even_odd >= 0:
if args.even_odd:
suffix = 'head'
else:
suffix = 'tail'
else:
suffix = ''
for epoch in range(1, optim_config['epochs'] + 1):
train_log = train(epoch, model, optimizer, scheduler, criterion,
train_loader, run_config)
test_log = test(args, epoch, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
epoch_log = train_log.copy()
epoch_log.update(test_log)
epoch_logs.append(epoch_log)
# with open(os.path.join(output_dir, 'log.json'), 'w') as fout:
# json.dump(epoch_logs, fout, indent=2)
if epoch % save_freq == 0 or epoch == optim_config['epochs']:
state = OrderedDict([
('config', config),
('state_dict', model.state_dict()),
('optimizer', optimizer.state_dict()),
('epoch', epoch),
('accuracy_vs', test_log['test']['accuracy_vs']),
])
model_path = os.path.join(output_dir,('model_state_epoch_%s_%d.pth' % (suffix, epoch)))
torch.save(state, model_path)
print("Saved model for path %s" %(model_path))
test(args, 0, model, criterion, test_loader, run_config, mean, std, base_model = base_model, dataframe_file = dataframe_file)
if __name__ == '__main__':
main() | en | 0.377779 | Train data sourcing model. Based on code from https://github.com/hysts/pytorch_shake_shake # logging.basicConfig( # format='[%(asctime)s %(name)s %(levelname)s] - %(message)s', # datefmt='%Y/%m/%d %H:%M:%S', # level=logging.INFO) # logger = logging.getLogger(__name__) # logger.info(f'Mean standard normalize input shape: {input.shape}') # state_dict = {strip_data_parallel(k): v for k, v in state_dict.items()} # model config # parser.add_argument('--model', type=str, default='wrn-28-10') # detector model config # base model configs # run config # Semi-supervised training configuration # optim config #train configs # 10 CIFAR10 classes and one non-CIFAR10 class # ('name', args.model), # since lr_lambda computes multiplicative factor # print(indexes) # Computing cifar10 accuracy # Computing cifar10 vs. ti accuracy # TODO: This is hacky rn. See the right way to load TinyImages # logger.info(f'Tiny images data shape: {data.shape}') # logger.info(f'Tiny images data shape: {data.shape}') # print(data.shape) # print(tuple(data.shape)) # print(torch.transpose(data,1,3).view(-1,*tuple(data_shape[2:])).shape) # outputs = model(normalize_func(tensor=data.squeeze(1)).reshape(data_shape)) # print(indexes) # is_pred_c10 = preds != 10 # cifar10 accuracy # cifar10 vs. TI accuracy # print("Step %d, batch size %d, correct_c10_vs_ti_count %d" %(step, len(targets), correct_c10_v_ti_)) # print("Batch %d, batch df shape %s" %(step, str(batch_df.shape))) # plot_histogram(cifar_conf, noncifar_conf, dataset) # print('Non cifar probabilities:') # print(noncifar_all_confs) # parse command line arguments # set random seed # create output directory # outdir = pathlib.Path(run_config['outdir']) # outdir.mkdir(exist_ok=True, parents=True) # save config as json file in output directory # if args.dataset == 'custom': # custom_dataset = get_new_distribution_loader() # print("custom dataset loaded ....") # transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) # mean = torch.tensor([0.4914, 0.4822, 0.4465]) # std = torch.tensor([ # 0.2470, 0.2435, 0.2616]) # custom_testset = SemiSupervisedDataset(base_dataset=args.dataset, # train=False, root='data', # download=True, # custom_dataset = custom_dataset, # transform=transform_test) # mean, std = # data loaders # model # custom_dataset = get_new_distribution_loader() # print("custom dataset loaded ....") # normalize_func = transforms.Normalize(mean.unsqueeze(0),std.unsqueeze(0)) # optimizer # optim_config['steps_per_epoch'] = len(train_loader) # optimizer = torch.optim.SGD( # model.parameters(), # lr=optim_config['base_lr'], # momentum=optim_config['momentum'], # weight_decay=optim_config['weight_decay'], # nesterov=optim_config['nesterov']) # scheduler = get_cosine_annealing_scheduler(optimizer, optim_config) # run test before start training # with open(os.path.join(output_dir, 'log.json'), 'w') as fout: # json.dump(epoch_logs, fout, indent=2) | 2.372117 | 2 |