id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1785508 | #!/usr/bin/env python
import os
from textwrap import dedent
from blessings import Terminal
import click
from cookiecutter.main import cookiecutter
import adama
from adama import __version__
from adama.tools import location_of
t = Terminal()
HERE = os.path.join(location_of(adama.__file__), 'generator')
@click.group()
def generator():
click.echo(dedent(
"""
{t.bold_yellow}Adama v{version}{t.normal}
{t.cyan}Adapter generator{t.normal}
""".format(t=t, version=__version__)))
@generator.command()
@click.option('--name',
prompt="{t.green}Your name?{t.normal}".format(t=t))
@click.option('--email',
prompt="{t.green}Your email?{t.normal}".format(t=t))
@click.option('--type',
type=click.Choice(['query', 'map_filter', 'passthrough']),
prompt=("{t.green}Type of adapter?{t.normal} "
"[{t.cyan}query{t.normal}, "
"{t.cyan}map_filter{t.normal}, "
"{t.cyan}passthrough{t.normal}]")
.format(t=t))
@click.option('--language',
type=click.Choice(['python', 'javascript']),
prompt=("{t.green}Language of choice?{t.normal} "
"[{t.cyan}python{t.normal}, "
"{t.cyan}javascript{t.normal}]")
.format(t=t))
@click.option('--adapter_name',
prompt="{t.green}Name for the adapter?{t.normal}".format(t=t))
def create(**kwargs):
directory = os.path.join(
HERE, 'templates', kwargs['type'], kwargs['language'])
cookiecutter(directory, no_input=True, extra_context=kwargs)
click.echo('{t.bold_yellow}Done!{t.normal}'.format(t=t))
@generator.command()
def publish():
click.echo('will publish')
if __name__ == '__main__':
generator()
| StarcoderdataPython |
22413 | <filename>generate_hamming_command.py
import numpy as np
import os
path = 'preds'
files = os.listdir(path)
lst = []
for f in files:
if f.find('_0_HASH') == -1:
continue
if f.find('CW') == -1:
continue
if f.find('low')==-1 and f.find('high')==-1 and f.find('mix')==-1:
continue
if f.endswith('show.npy'):
lst.append(f)
for f in lst:
strs = f.split('_0_HASH_')
print(strs)
a = np.load(os.path.join(path, strs[0]+'_0_HASH_'+strs[1]))
b = np.load(os.path.join(path, strs[0]+'_20_HASH_'+strs[1]))
c = np.load(os.path.join(path, strs[0]+'_40_HASH_'+strs[1]))
d = np.load(os.path.join(path, strs[0]+'_60_HASH_'+strs[1]))
np.save(os.path.join(path, strs[0]+'_80_HASH_'+strs[1]), np.hstack((a,b,c,d)))
| StarcoderdataPython |
1772696 | from collections import abc, OrderedDict
from .line import Unknown, Dialogue, Movie, Command, Sound, Picture, Comment, Style
from .data import _Field
__all__ = (
'LineSection',
'FieldSection',
'EventsSection',
'StylesSection',
'ScriptInfoSection',
)
class LineSection(abc.MutableSequence):
FORMAT_TYPE = "Format"
line_parsers = None
field_order = None
def __init__(self, name, lines=None):
self.name = name
self._lines = [] if lines is None else lines
def dump(self):
yield "[{}]".format(self.name)
if self.field_order is not None:
yield "{}: {}".format(LineSection.FORMAT_TYPE, ", ".join(self.field_order))
for line in self._lines:
yield line.dump_with_type(self.field_order)
def add_line(self, type_name, raw_line):
# field order is optional
if type_name.lower() == LineSection.FORMAT_TYPE.lower():
self.field_order = [field.strip() for field in raw_line.split(",")]
else:
if self.line_parsers is not None and type_name.lower() not in self.line_parsers:
raise ValueError("unexpected {} line in {}".format(type_name, self.name))
parser = (self.line_parsers[type_name.lower()]
if self.line_parsers is not None
else Unknown)
self._lines.append(parser.parse(type_name, raw_line, self.field_order))
def set_data(self, lines):
if not isinstance(lines, abc.MutableSequence):
raise ValueError("Lines must be a mutable list")
self._lines = lines
def __getitem__(self, index):
return self._lines[index]
def __setitem__(self, index, val):
self._lines[index] = val
def __delitem__(self, index):
del self._lines[index]
def __len__(self):
return len(self._lines)
def insert(self, index, val):
self._lines.insert(index, val)
def __repr__(self):
return "{}({!r}, {!r})".format(self.__class__.__name__, self.name, self._lines)
class FieldSection(abc.MutableMapping):
# avoid metaclass conflict by keeping track of fields in a dict instead
FIELDS = {}
def __init__(self, name, fields=None):
self.name = name
self._fields = OrderedDict() if fields is None else fields
def add_line(self, field_name, field):
if field_name in self.FIELDS:
field = self.FIELDS[field_name].parse(field)
self._fields[field_name] = field
def dump(self):
yield "[{}]".format(self.name)
for k, v in self._fields.items():
yield "{}: {}".format(k, _Field.dump(v))
def set_data(self, fields):
if not isinstance(fields, abc.MutableMapping):
raise ValueError("Fields must be a mutable mapping")
self._fields = fields
def __contains__(self, key):
return key in self._fields
def __getitem__(self, key):
return self._fields[key]
def __setitem__(self, key, value):
self._fields[key] = value
def __delitem__(self, key):
del self._fields[key]
def __iter__(self):
return iter(self._fields)
def __len__(self):
return len(self._fields)
def __repr__(self):
return "{}({!r}, {!r})".format(self.__class__.__name__, self.name, self._fields)
def clear(self): # Optional, but should be faster this way
return self._fields.clear()
def copy(self):
return self.__class__(self.name, self._fields.copy())
class EventsSection(LineSection):
field_order = Dialogue.DEFAULT_FIELD_ORDER
line_parsers = {
"dialogue": Dialogue, # noqa: E241
"comment": Comment, # noqa: E241
"picture": Picture, # noqa: E241
"sound": Sound, # noqa: E241
"movie": Movie, # noqa: E241
"command": Command # noqa: E241
}
class StylesSection(LineSection):
field_order = Style.DEFAULT_FIELD_ORDER
line_parsers = {
"style": Style
}
class ScriptInfoSection(FieldSection):
VERSION_ASS = "v4.00+"
VERSION_SSA = "v4.00"
FIELDS = {
"ScriptType": _Field("ScriptType", str, default=VERSION_ASS),
"PlayResX": _Field("PlayResX", int, default=640),
"PlayResY": _Field("PlayResY", int, default=480),
"WrapStyle": _Field("WrapStyle", int, default=0),
"ScaledBorderAndShadow": _Field("ScaledBorderAndShadow", str, default="yes")
}
| StarcoderdataPython |
1728196 | <reponame>adilshiekh00/clash-wars
import asyncio
from pytgcalls import idle
from driver.veez import call_py, bot
async def mulai_bot():
print("[VEEZ]: STARTING BOT CLIENT")
await bot.start()
print("[VEEZ]: STARTING PYTGCALLS CLIENT")
await call_py.start()
await idle()
await pidle()
print("[VEEZ]: STOPPING BOT & USERBOT")
await bot.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(mulai_bot())
| StarcoderdataPython |
41142 | #!/usr/bin/env pythonw
import numpy as np
import matplotlib.pyplot as plt
def flip_coins(flips = 1000000, bins=100):
# Uninformative prior
prior = np.ones(bins, dtype='float')/bins
likelihood_heads = np.arange(bins)/float(bins)
likelihood_tails = 1-likelihood_heads
flips = np.random.choice(a=[True, False], size=flips, p=[0.75, 0.25])
for coin in flips:
if coin: # Heads
posterior = prior * likelihood_heads
else: # Tails
posterior = prior * likelihood_tails
# Normalize
posterior /= np.sum(posterior)
# The posterior is now the new prior
prior = posterior
return posterior
plt.plot(np.arange(100)/float(100), flip_coins(10))
plt.plot(np.arange(100)/float(100), flip_coins(100))
plt.plot(np.arange(100)/float(100), flip_coins(1000))
plt.plot(np.arange(100)/float(100), flip_coins(10000))
plt.plot(np.arange(100)/float(100), flip_coins(100000))
plt.legend([10, 100, 1000, 10000, 100000])
plt.show() | StarcoderdataPython |
3390671 | <filename>todo/signals.py
from django import dispatch
task_completion_toggled = dispatch.Signal(providing_args=["task"])
| StarcoderdataPython |
1622154 | <filename>tests/playbook/test_playbook_tc_entity_types.py<gh_stars>0
"""Test the TcEx Batch Module."""
# standard library
from typing import TYPE_CHECKING, Any, Dict, List, Union
# third-party
import pytest
# first-party
from tcex.input.field_types import KeyValue
if TYPE_CHECKING:
# first-party
from tcex.playbook.playbook import Playbook
# pylint: disable=no-self-use
class TestUtils:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:te1!TCEntity', {'id': '001', 'value': '1.1.1.1', 'type': 'Address'}),
('#App:0002:te2!TCEntity', {'id': '002', 'value': '2.2.2.2', 'type': 'Address'}),
('#App:0002:te3!TCEntity', {'id': '003', 'value': '3.3.3.3', 'type': 'Address'}),
('#App:0002:te4!TCEntity', {'id': '004', 'value': '3.3.3.3', 'type': 'Address'}),
],
)
def test_playbook_tc_entity_pass(
self, variable: str, value: Union[dict, KeyValue], playbook: 'Playbook'
):
"""Test playbook variables."""
playbook.create.tc_entity(variable, value, when_requested=False)
result = playbook.read.tc_entity(variable)
assert result == value, f'result of ({result}) does not match ({value})'
playbook.delete.variable(variable)
assert playbook.read.variable(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:b1!TCEntity', {'one': '1', 'two': 'two'}),
('#App:0002:b2!TCEntity', []),
('#App:0002:b3!TCEntity', {}),
('#App:0002:b4!WrongType', 'wrong type'),
],
)
def test_playbook_tc_entity_fail(self, variable: str, value: Any, playbook: 'Playbook'):
"""Test playbook variables."""
try:
playbook.create.tc_entity(variable, value, when_requested=False)
assert False, f'{value} is not a valid TCEntity value'
except RuntimeError:
assert True
@pytest.mark.parametrize(
'variable,value',
[
(
'#App:0002:tea1!TCEntityArray',
[
{'id': '001', 'value': '1.1.1.1', 'type': 'Address'},
{'id': '011', 'value': '11.11.11.11', 'type': 'Address'},
],
),
(
'#App:0002:tea2!TCEntityArray',
[
{'id': '002', 'value': '2.2.2.2', 'type': 'Address'},
{'id': '022', 'value': '22.22.22.22', 'type': 'Address'},
],
),
(
'#App:0002:tea3!TCEntityArray',
[
{'id': '003', 'value': '3.3.3.3', 'type': 'Address'},
{'id': '033', 'value': '33.33.33.33', 'type': 'Address'},
],
),
(
'#App:0002:tea4!TCEntityArray',
[
{'id': '004', 'value': '4.4.4.4', 'type': 'Address'},
{'id': '044', 'value': '4172.16.17.32', 'type': 'Address'},
],
),
],
)
def test_playbook_tc_entity_array_pass(
self, variable: str, value: List[Dict[str, str]], playbook: 'Playbook'
):
"""Test playbook variables."""
playbook.create.tc_entity_array(variable, value, when_requested=False)
result = playbook.read.tc_entity_array(variable)
assert result == value, f'result of ({result}) does not match ({value})'
playbook.delete.variable(variable)
assert playbook.read.variable(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
(
'#App:0003:tea1!TCEntityArray',
[
{'id': '001', 'value': '1.1.1.1', 'type': 'Address'},
{'id': '011', 'ip': '11.11.11.11', 'type': 'Address'},
],
),
('#App:0003:tea2!TCEntityArray', 'not a TCEntityArray'),
('#App:0003:tea3!WrongType', 'wrong type'),
],
)
def test_playbook_tc_entity_array_fail(self, variable: str, value: Any, playbook: 'Playbook'):
"""Test playbook variables."""
with pytest.raises(RuntimeError) as ex:
playbook.create.tc_entity_array(variable, value, when_requested=False)
assert 'Invalid ' in str(ex.value)
| StarcoderdataPython |
1692 | <filename>CTFd/api/v1/users.py
from flask import session, request, abort
from flask_restplus import Namespace, Resource
from CTFd.models import (
db,
Users,
Solves,
Awards,
Tracking,
Unlocks,
Submissions,
Notifications,
)
from CTFd.utils.decorators import authed_only, admins_only, ratelimit
from CTFd.cache import clear_standings
from CTFd.utils.user import get_current_user, is_admin
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.schemas.awards import AwardSchema
from CTFd.schemas.users import UserSchema
users_namespace = Namespace("users", description="Endpoint to retrieve Users")
@users_namespace.route("")
class UserList(Resource):
@check_account_visibility
def get(self):
users = Users.query.filter_by(banned=False, hidden=False)
response = UserSchema(view="user", many=True).dump(users)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
def post(self):
req = request.get_json()
schema = UserSchema("admin")
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
if request.args.get("notify"):
name = response.data.name
password = <PASSWORD>")
clear_standings()
response = schema.dump(response.data)
return {"success": True, "data": response.data}
@users_namespace.route("/<int:user_id>")
@users_namespace.param("user_id", "User ID")
class UserPublic(Resource):
@check_account_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
response = UserSchema(view=session.get("type", "user")).dump(user)
if response.errors:
return {"success": False, "errors": response.errors}, 400
response.data["place"] = user.place
response.data["score"] = user.score
return {"success": True, "data": response.data}
@admins_only
def patch(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
data = request.get_json()
data["id"] = user_id
schema = UserSchema(view="admin", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response}
@admins_only
def delete(self, user_id):
Notifications.query.filter_by(user_id=user_id).delete()
Awards.query.filter_by(user_id=user_id).delete()
Unlocks.query.filter_by(user_id=user_id).delete()
Submissions.query.filter_by(user_id=user_id).delete()
Solves.query.filter_by(user_id=user_id).delete()
Tracking.query.filter_by(user_id=user_id).delete()
Users.query.filter_by(id=user_id).delete()
db.session.commit()
db.session.close()
clear_standings()
return {"success": True}
@users_namespace.route("/me")
class UserPrivate(Resource):
@authed_only
def get(self):
user = get_current_user()
response = UserSchema("self").dump(user).data
response["place"] = user.place
response["score"] = user.score
return {"success": True, "data": response}
@authed_only
def patch(self):
user = get_current_user()
data = request.get_json()
schema = UserSchema(view="self", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response.data}
@users_namespace.route("/me/solves")
class UserPrivateSolves(Resource):
@authed_only
def get(self):
user = get_current_user()
solves = user.get_solves(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/me/fails")
class UserPrivateFails(Resource):
@authed_only
def get(self):
user = get_current_user()
fails = user.get_fails(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
return {"success": True, "data": data, "meta": {"count": count}}
@users_namespace.route("/me/awards")
@users_namespace.param("user_id", "User ID")
class UserPrivateAwards(Resource):
@authed_only
def get(self):
user = get_current_user()
awards = user.get_awards(admin=True)
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/<user_id>/solves")
@users_namespace.param("user_id", "User ID")
class UserPublicSolves(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
solves = user.get_solves(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
@users_namespace.route("/<user_id>/fails")
@users_namespace.param("user_id", "User ID")
class UserPublicFails(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
fails = user.get_fails(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
# return {"success": True, "data": data, "meta": {"count": count}}
return {"success": True, "data": None, "meta": {"count": None}}
@users_namespace.route("/<user_id>/awards")
@users_namespace.param("user_id", "User ID or 'me'")
class UserPublicAwards(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
awards = user.get_awards(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
| StarcoderdataPython |
117439 | <filename>muas_sid/muas_sid/cli.py
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
from pathlib import Path
from muas_sid import __version__
module = sys.modules["__main__"].__file__
logger = logging.getLogger(module)
def existing_file(value: str, extensions: tuple = None) -> Path:
"""Check object is an existing file
Arguments:
value {str} -- File path
Keyword Arguments:
extensions {tuple} -- Possible file extensions (default: {None})
Raises:
IOError: Not an existing file
argparse.ArgumentTypeError: Exists, but is not the correct file type
Returns:
Path -- Path object to file
"""
file_path = Path(value).expanduser()
if not os.path.isfile(file_path):
raise IOError("{} does not exist".format(file_path))
if extensions is not None:
suffixes = [ext.lower() for ext in extensions]
if file_path.suffix.lower() not in suffixes:
raise argparse.ArgumentTypeError(
"{} is not a file of type {}".format(value, extensions)
)
return file_path
def existing_directory(value: str) -> Path:
"""Check object is an existing directory
Arguments:
value {str} -- Directory path
Raises:
IOError: Not an existing directory
argparse.ArgumentTypeError: Exists, but is not a directory
Returns:
Path -- Path object to directory
"""
directory_path = Path(value).expanduser()
if not directory_path.exists():
raise IOError("{} does not exists".format(value))
if not directory_path.is_dir():
raise argparse.ArgumentTypeError("{} is not a directory".format(value))
return directory_path
def parse_command_line(argv: list = None) -> argparse.Namespace:
"""Handle command line arguments
Keyword Arguments:
argv {list} -- Vector of arguments (default: {None})
Returns:
argparse.Namespace -- Parsed arguments
"""
if argv is None:
argv = sys.argv
formatter_class = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(
description=__package__, formatter_class=formatter_class
)
parser.add_argument(
"input_path",
action="store",
type=lambda f: existing_file(f, (".bin", ".log", ".tlog")),
help="Input path to file ending with (.bin, .log, .tlog)",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
required=False,
help="Increase log verbosity (max -vvv)",
dest="verbose_count",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
required=False,
help="Show debugging messages (eqv. to -vv, overrides verbosity flag)",
)
parser.add_argument(
"-s",
"--silent",
action="store_true",
required=False,
help="Suppress log warning and lower messages (overrides other verbosity flags)",
)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {}".format(__version__),
help="show the version and exit",
)
# TODO: Add interactive mode via iPython
args = parser.parse_args()
if args.silent:
logger.setLevel(logging.ERROR)
elif args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(max(3 - args.verbose_count, 1) * 10)
return args
| StarcoderdataPython |
3237407 | <reponame>ID56/Multimodal-Fusion-CRNN<gh_stars>0
import albumentations as A
import numpy as np
import cv2
def joint_shift_scale_rotate(joint_points: np.ndarray, shift_limit: float, scale_limit: float, rotate_limit: int, p: float = 0.5) -> np.ndarray:
"""Shift, scale, and rotate joint points within a specified range.
Args:
joint_points (np.ndarray): Joint points, of shape (num_frames, 44).
shift_limit (float): How much the points can be shifted.
scale_limit (float): Scale factor range.
rotate_limit (int): Rotation range.
p (float, optional): Probability of applying transform. Defaults to 0.5.
Returns:
np.ndarray: [description]
"""
if np.random.random() >= p:
return joint_points
palm_idx = 1
num_frames = joint_points.shape[0]
if shift_limit:
shift = np.random.uniform(-shift_limit, shift_limit, 2) # shift = (shift_x, shift_y)
joint_points = joint_points.reshape(num_frames, 22, -1) # (num_frames, 44) -> (num_frames, 22, 2)
joint_points = joint_points + shift # shift is broadcasted and added
joint_points[0, palm_idx] = 0 # joint points are supposed to be normalized relative to the palm point
joint_points = joint_points.reshape(num_frames, -1) # if palm point is non-zero, cannot apply scaling
if scale_limit:
scale_factor = 1 + np.random.uniform(-scale_limit, scale_limit)
joint_points *= scale_factor
if rotate_limit:
rot_angle = np.random.randint(-rotate_limit, rotate_limit)
joint_points = joint_points.reshape(num_frames, 22, -1)
for i in range(num_frames):
center = joint_points[i, palm_idx, :2]
rot_mat = cv2.getRotationMatrix2D(center, rot_angle, 1)
joint_points[i] = np.hstack([joint_points[i], np.ones((22, 1))]) @ rot_mat.T
joint_points = joint_points.reshape(num_frames, -1)
return joint_points
def image_shift_scale_rotate(image_sequence: np.ndarray, shift_limit: float, scale_limit:float, rotate_limit: int, p : float = 0.5):
"""Shift scale and rotate image within a certain limit."""
transform = A.ReplayCompose([
A.ShiftScaleRotate(shift_limit=shift_limit, scale_limit=scale_limit, rotate_limit=rotate_limit, p=p)
])
data = transform(image=image_sequence[0])
image_sequence[0] = data["image"]
# Use same params for all frames
for i in range(1, image_sequence.shape[0]):
image_sequence[i] = A.ReplayCompose.replay(data['replay'], image=image_sequence[i])["image"]
return image_sequence
def shift_scale_rotate(image_sequence: np.ndarray, joint_points: np.ndarray, shift_limit: float, scale_limit:float, rotate_limit: int, p : float = 0.5):
transform = A.ReplayCompose([
A.ShiftScaleRotate(shift_limit=shift_limit, scale_limit=scale_limit, rotate_limit=rotate_limit, p=p)
])
data = transform(image=image_sequence[0])
if not data['replay']['transforms'][0]['applied']:
return image_sequence, joint_points
image_sequence[0] = data["image"]
for i in range(1, image_sequence.shape[0]):
image_sequence[i] = A.ReplayCompose.replay(data['replay'], image=image_sequence[i])["image"]
params = data['replay']['transforms'][0]['params']
rot_angle = params['angle']
scale_factor = params['scale']
shift = np.array([params['dx'], params['dy']])
palm_idx = 1
num_frames = joint_points.shape[0]
if np.any(shift):
joint_points = joint_points.reshape(num_frames, 22, -1) # (num_frames, 44) -> (num_frames, 22, 2)
joint_points = joint_points + shift # shift is broadcasted and added
joint_points[0, palm_idx] = 0 # joint points are supposed to be normalized relative to the palm point
joint_points = joint_points.reshape(num_frames, -1) # if palm point is non-zero, cannot apply scaling
if scale_factor:
joint_points *= scale_factor
if rot_angle:
joint_points = joint_points.reshape(num_frames, 22, -1)
for i in range(num_frames):
center = joint_points[i, palm_idx, :2]
rot_mat = cv2.getRotationMatrix2D(center, rot_angle, 1)
joint_points[i] = np.hstack([joint_points[i], np.ones((22, 1))]) @ rot_mat.T
joint_points = joint_points.reshape(num_frames, -1)
return image_sequence, joint_points
def time_shift(image_sequence, joint_points, frame_limit, p):
"""shift frames by random frames."""
if np.random.random() >= p:
return image_sequence, joint_points
shift = np.random.randint(-frame_limit, frame_limit)
if shift < 0: # cut off some start frames
image_sequence = image_sequence[-shift:]
joint_points = joint_points[-shift:]
elif shift > 0: # cut off some end frames
image_sequence = image_sequence[:-shift]
joint_points = joint_points[:-shift]
return image_sequence, joint_points
def apply_augs(joint_points, image_sequence, augs):
if "shift_scale_rotate" in augs:
image_sequence, joint_points = shift_scale_rotate(image_sequence, joint_points, **augs["shift_scale_rotate"])
else:
if "joint_shift_scale_rotate" in augs:
joint_points = joint_shift_scale_rotate(joint_points, **augs["joint_shift_scale_rotate"])
if "image_shift_scale_rotate" in augs:
image_sequence = image_shift_scale_rotate(image_sequence, **augs["image_shift_scale_rotate"])
if "time_shift" in augs:
image_sequence, joint_points = time_shift(image_sequence, joint_points, **augs["time_shift"])
return joint_points, image_sequence | StarcoderdataPython |
184369 | class Solution:
def trap(self, height: List[int]) -> int:
res = 0
# build memos
max_height_left = [0] * len(height)
max_height_left[0] = height[0]
for i in range(1, len(height)):
max_height_left[i] = max(max_height_left[i-1], height[i])
max_height_right = [0] * len(height)
max_height_right[0] = height[-1]
for i in range(1, len(height)):
max_height_right[i] = max(max_height_right[i-1], height[-i-1])
for i in range(1, len(height) - 1):
l_max = max_height_left[i]
r_max = max_height_right[len(height) - i - 1]
res += min(l_max, r_max) - height[i]
return res | StarcoderdataPython |
13154 | import tensorflow as tf
import pandas as pd
import numpy as np
import sys
import time
from cflow import ConditionalFlow
from MoINN.modules.subnetworks import DenseSubNet
from utils import train_density_estimation, plot_loss, plot_tau_ratio
# import data
tau1_gen = np.reshape(np.load("../data/tau1s_Pythia_gen.npy"), (-1,1))
tau2_gen = np.reshape(np.load("../data/tau2s_Pythia_gen.npy"), (-1,1))
tau1_sim = np.reshape(np.load("../data/tau1s_Pythia_sim.npy"), (-1,1))
tau2_sim = np.reshape(np.load("../data/tau2s_Pythia_sim.npy"), (-1,1))
data_gen = tf.convert_to_tensor(np.concatenate([tau1_gen,tau2_gen], axis=-1), dtype=tf.float32)
data_sim = tf.convert_to_tensor(np.concatenate([tau1_sim,tau2_sim], axis=-1), dtype=tf.float32)
train_gen, test_gen = np.split(data_gen, 2)
train_sim, test_sim = np.split(data_sim, 2)
# Get the flow
meta = {
"units": 16,
"layers": 4,
"initializer": "glorot_uniform",
"activation": "leakyrelu",
}
cflow = ConditionalFlow(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta, subnet_constructor=DenseSubNet)
# train the network
EPOCHS = 50
BATCH_SIZE = 1000
LR = 5e-3
DECAY_RATE=0.1
ITERS = len(train_gen)//BATCH_SIZE
DECAY_STEP=ITERS
#Prepare the tf.dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_gen, train_sim))
train_dataset = train_dataset.shuffle(buffer_size=500000).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(LR, DECAY_STEP, DECAY_RATE)
opt = tf.keras.optimizers.Adam(lr_schedule)
train_losses = []
#train_all = np.concatenate([train_gen, train_sim], axis=-1)
start_time = time.time()
for e in range(EPOCHS):
batch_train_losses = []
# Iterate over the batches of the dataset.
for step, (batch_gen, batch_sim) in enumerate(train_dataset):
batch_loss = train_density_estimation(cflow, opt, batch_gen, [batch_sim])
batch_train_losses.append(batch_loss)
train_loss = tf.reduce_mean(batch_train_losses)
train_losses.append(train_loss)
if (e + 1) % 1 == 0:
# Print metrics
print(
"Epoch #{}: Loss: {}, Learning_Rate: {}".format(
e + 1, train_losses[-1], opt._decayed_lr(tf.float32)
)
)
end_time = time.time()
print("--- Run time: %s hour ---" % ((end_time - start_time)/60/60))
print("--- Run time: %s mins ---" % ((end_time - start_time)/60))
print("--- Run time: %s secs ---" % ((end_time - start_time)))
# Make plots and sample
plot_loss(train_losses, name="Log-likelihood", log_axis=False)
detector = tf.constant(test_sim, dtype=tf.float32)
unfold_gen = cflow.sample(int(5e5),[detector])
plot_tau_ratio(test_gen, unfold_gen, detector, name="tau_ratio")
unfold_gen = {}
for i in range(10):
unfold_gen[i] = cflow.sample(int(5e5),[detector])
unfold_pythia = np.stack([unfold_gen[i] for i in range(10)])
np.save("inn_pythia",unfold_pythia) | StarcoderdataPython |
132306 |
# imports shared throughout the project
import sys
import importlib
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# CONSTANTS
PJ_TO_GWH = 277.7778 # [GWh / PJ]
GWH_TO_PJ = 1/PJ_TO_GWH #[PJ/GWH]
# HELPER
import FLUCCOplus.config as config
EM_TO_EXCEL_colnames = {
"power_production_wind_avg": "Windkraft",
"power_production_solar_avg": "Photovoltaik",
"power_production_hydro_avg": "Laufkraft",
"total_consumption_avg": "Strombedarf",
"total_production_avg": "Erzeugung",
"power_consumption_hydro_discharge_avg": "Pumpspeicher"}
EXCEL_TO_EM_colnames = {v: k for k, v in EM_TO_EXCEL_colnames.items()}
EXCEL_TO_EM_colnames["Volatile EE"] = "power_production_volatile_avg"
EXCEL_TO_EM_colnames["Nicht-Volatile"] = "power_production_non-volatile_avg"
EXCEL_TO_EM_colnames["Pumpspeicher"] = "power_consumption_hydro_discharge_avg"
EXCEL_TO_EM_colnames["Wasserkraft"] = "power_production_hydro_and_discharge_avg"
def log(f):
logger = config.logging.getLogger(f.__module__)
def wrapper(*args, **kwargs):
tic = time.time()*1000
result = f(*args, **kwargs)
toc = time.time()*1000
logger.info(f"{f.__name__} - {round(toc-tic,2)}ms")
return result
return wrapper
def logg(f):
logger = config.logging.getLogger(f.__module__)
def wrapper(dataframe, *args, **kwargs):
result = log(f)(dataframe, *args, **kwargs)
ro, co = result.shape
logger.debug(f"{f.__name__} df.shape = ({ro}, {co})")
return result
return wrapper
def plot_signal_bars(df, columns, ytick_average_max=False, cut_ylim=False, figsize=True):
"""takes a df series, with -1 and +1 denoting OFF and ON signals"""
desc_wind = pd.DataFrame()
df_step_wind = pd.DataFrame()
df_not_wind = pd.DataFrame()
# fig, ax = plt.subplots()
for c in columns:
df_step_wind[c] = df[c].shift(1).ne(df[c]).where(df[c] == 1).cumsum()
df_not_wind[c] = df[c].shift(1).ne(df[c]).where(df[c] == -1).cumsum()
df_step_wind.iloc[0, :] = 0
desc_wind["Zeitraum mit Signal [h]"] = df.where(df > 0).sum()
desc_wind["Nicht-Signal-Zeitraum [h]"] = len(df) - desc_wind["Zeitraum mit Signal [h]"]
desc_wind["Anzahl Signal-Perioden"] = df_step_wind.max()
desc_wind["Durchschnittliche Dauer Signal [h]"] = (
desc_wind["Zeitraum mit Signal [h]"] / desc_wind["Anzahl Signal-Perioden"])
desc_wind["Durchschnittliche Dauer Nicht-Signal [h]"] = desc_wind["Nicht-Signal-Zeitraum [h]"] / desc_wind[
"Anzahl Signal-Perioden"]
fig, ax = plt.subplots(1, 2, figsize=figsize)
desc_wind.loc[columns][["Zeitraum mit Signal [h]", "Nicht-Signal-Zeitraum [h]"]] \
.plot(kind="bar", color=["cyan", "black"], stacked=True, ax=ax[0]).set(ylabel="Stunden")
desc_wind.loc[columns][["Durchschnittliche Dauer Signal [h]", "Durchschnittliche Dauer Nicht-Signal [h]"]] \
.plot(kind="bar", color=["orange", "grey"], stacked=False, ax=ax[1]).set(ylabel="Stunden")
for p in ax[0].patches:
ax[0].annotate("{:.1f}%".format(p.get_height() * 100 / len(df)),
(p.get_x() + p.get_width() / 2., p.get_height() + p.get_y() - 5), ha='center', va='center',
fontsize=7, color='black', xytext=(0, -8), textcoords='offset points')
for p in ax[1].patches:
ax[1].annotate("{:.0f}".format(p.get_height()), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center',
va='center', fontsize=7, color='black', xytext=(0, -8), textcoords='offset points')
if ytick_average_max:
ax[1].yaxis.set_ticks(np.arange(0, ytick_average_max, 24)) # TODO: as function parameters
if cut_ylim:
plt.ylim(top=cut_ylim)
plt.grid(axis="x")
return fig, ax
def Ueberschuesse_PVfirst(df):
df["Non_volatiles"] = df.Pumpspeicher + df.Laufkraft
df["RESohneWind"] = df.Laufkraft + df.Photovoltaik + df.Pumpspeicher
df["Residual_ohne_Wind"] = df.Strombedarf - df.Photovoltaik - df.Non_volatiles
df["Zero"] = 0
df["Wind_useful"] = (df[["Windkraft", "Residual_ohne_Wind"]]).min(axis=1).clip(0, None)
df["WindkraftUeSch"] = 0 # Überschuss
df["WindkraftDV"] = 0 # Direktverbrauch
df["WindkraftLast"] = 0
df["PVUeSch"] = 0 # Überschuss
df["PVDV"] = 0 # Direktverbrauch
for t in range(8760):
if (df.Photovoltaik[t] + df.Non_volatiles[t]) >= df.Strombedarf[t]:
df.WindkraftUeSch[t] = df.Windkraft[t]
df.PVDV[t] = df.Strombedarf[t] - df.Non_volatiles[t]
df.PVUeSch[t] = df.Photovoltaik[t] - df.PVDV[t]
else:
if df.RES[t] <= df.Strombedarf[t]:
df.WindkraftUeSch[t] = 0
df.PVUeSch[t] = 0
df.WindkraftDV[t] = df.Windkraft[t]
df.PVDV[t] = df.Photovoltaik[t]
else:
df.PVDV[t] = df.Photovoltaik[t]
df.WindkraftDV[t] = df.Strombedarf[t] - (df.PVDV[t] + df.Non_volatiles[t])
df.WindkraftUeSch[t] = df.Windkraft[t] - df.WindkraftDV[t]
if df.RES[t] > df.Strombedarf[t]:
df.WindkraftLast[t] = df.Strombedarf[t] - df.RES[t] + df.Windkraft[t]
return df
def Ueberschuesse_WINDfirst(df2):
df2["Non_volatiles"] = df2.Pumpspeicher + df2.Laufkraft
df2["Zero"] = 0
df2["Residual_ohne_Wind"] = df2.Strombedarf - df2.Non_volatiles
df2["Wind_useful"] = (df2[["Windkraft", "Residual_ohne_Wind"]]).min(axis=1).clip(0, None)
df2["WindkraftUeSch"] = 0 # Überschuss
df2["WindkraftDV"] = 0
df2["WindkraftLast"] = 0 # Direktverbrauch
df2["PVUeSch"] = 0 # Überschuss
df2["PVLast"] = 0 # Direktverbrauch
df2["PVDV"] = 0
for t in range(8760):
if (df2.Windkraft[t] + df2.Non_volatiles[t]) <= df2.Strombedarf[t]:
df2.WindkraftDV[t] = df2.Windkraft[t]
if (df2.Windkraft[t] + df2.Non_volatiles[t]) > df2.Strombedarf[t]:
if df2.Non_volatiles[t] > df2.Strombedarf[t]:
df2.WindkraftUeSch[t] = df2.Windkraft[t]
else:
df2.WindkraftUeSch[t] = df2.Windkraft[t] + df2.Non_volatiles[t] - df2.Strombedarf[t]
df2.WindkraftDV[t] = df2.Strombedarf[t] - df2.Non_volatiles[t]
if (df2.Windkraft[t] + df2.Non_volatiles[t]) >= df2.Strombedarf[t]:
df2.PVUeSch[t] = df2.Photovoltaik[t]
elif (df2.Windkraft[t] + df2.Non_volatiles[t]) < df2.Strombedarf[t]:
if df2.RES[t] < df2.Strombedarf[t]:
df2.PVUeSch[t] = 0
df2.PVDV[t] = df2.Photovoltaik[t]
else:
df2.PVDV[t] = df2.Strombedarf[t] - df2.Non_volatiles[t] - df2.WindkraftDV[t]
df2.PVUeSch[t] = df2.Photovoltaik[t] - df2.PVDV[t]
# if df.RES[t] > df.Strombedarf[t]:
# df.PVLast[t] = df.Strombedarf[t] - df.RES[t] + df.Photovoltaik[t]
# if df.RES[t] > df.Strombedarf[t]:
# df.WindkraftLast[t] = df.Strombedarf[t] - df.RES[t] + df.Windkraft[t]
df2["RESohnePV"] = df2.Laufkraft + df2.Windkraft + df2.Pumpspeicher
df2["Residual_ohne_PV"] = df2.Strombedarf - df2.Photovoltaik - df2.Non_volatiles
return df2
def maxnutz():
import FLUCCOplus.config as config
from pathlib import Path
df_nutz = pd.DataFrame()
df_nutz["Schaltsignal_REF"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_REF.csv")).iloc[:, 1]
df_nutz["Schaltsignal_REG"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_REG.csv")).iloc[:, 1]
df_nutz["Schaltsignal_UBA30"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_uba30.csv")).iloc[:,
1]
df_nutz["Schaltsignal_UBA50"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_uba50.csv")).iloc[:,
1]
df_nutz["Schaltsignal_VEIGL30"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_veigl30.csv")).iloc[
:, 1]
df_nutz["Schaltsignal_VEIGL50"] = pd.read_csv(config.DATA_PROCESSED / Path("MANutz/Schaltsignal_veigl50.csv")).iloc[
:, 1]
df_nutz = df_nutz.replace(1, -1)
df_nutz = df_nutz.replace(0, 1).replace(-1, 0)
return df_nutz.to_csv("../data/processed/MANutz/maxnutz_normalized.csv", sep=";", decimal=",")
if __name__ == "__main__":
@log
def test():
pass
test()
| StarcoderdataPython |
1722762 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from toscaparser.entity_template import EntityTemplate
from toscaparser.properties import Property
log = logging.getLogger('tosca')
class RelationshipTemplate(EntityTemplate):
'''Relationship template.'''
SECTIONS = (DERIVED_FROM, PROPERTIES, REQUIREMENTS,
INTERFACES, TYPE, DEFAULT_FOR) = \
('derived_from', 'properties', 'requirements', 'interfaces',
'type', 'default_for')
ANY = 'ANY'
def __init__(self, relationship_template, name, custom_def=None,
target=None, source=None):
super(RelationshipTemplate, self).__init__(name,
relationship_template,
'relationship_type',
custom_def)
self.name = name
self.target = target
self.source = source
self.capability = None
self.default_for = self.entity_tpl.get(self.DEFAULT_FOR)
def get_matching_capabilities(self, targetNodeTemplate, capability_name=None):
# return the capabilities on the given targetNodeTemplate that matches this relationship
capabilitiesDict = targetNodeTemplate.get_capabilities()
# if capability_name is set, make sure the target node has a capability
# that matching it as a name or or as a type
if capability_name:
capability = capabilitiesDict.get(capability_name)
if capability:
# just test the capability that matches the symbolic name
capabilities = [capability]
else:
# name doesn't match a symbolic name, see if its a valid type name
capabilities = [cap for cap in capabilitiesDict.values() if cap.is_derived_from(capability_name)]
else:
capabilities = list(capabilitiesDict.values())
# if valid_target_types is set, make sure the matching capabilities are compatible
capabilityTypes = self.type_definition.valid_target_types
if capabilityTypes:
capabilities = [cap for cap in capabilities
if any(cap.is_derived_from(capType) for capType in capabilityTypes)]
elif not capability_name and len(capabilities) > 1:
# find the best match for the targetNodeTemplate
# if no capability was specified and there are more than one to choose from, choose the most generic
featureCap = capabilitiesDict.get("feature")
if featureCap:
return [featureCap]
return capabilities
| StarcoderdataPython |
179533 | import torch
import torch.nn as nn
#from torch.autograd import Function
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
class LovaszSoftmax(nn.Module):
def __init__(self, reduction='mean'):
super(LovaszSoftmax, self).__init__()
self.reduction = reduction
def prob_flatten(self, input, target):
assert input.dim() in [4, 5]
num_class = input.size(1)
if input.dim() == 4:
input = input.permute(0, 2, 3, 1).contiguous()
input_flatten = input.view(-1, num_class)
elif input.dim() == 5:
input = input.permute(0, 2, 3, 4, 1).contiguous()
input_flatten = input.view(-1, num_class)
target_flatten = target.view(-1)
return input_flatten, target_flatten
def lovasz_softmax_flat(self, inputs, targets):
num_classes = inputs.size(1)
losses = []
for c in range(num_classes):
target_c = (targets == c).float()
if num_classes == 1:
input_c = inputs[:, 0]
else:
input_c = inputs[:, c]
loss_c = (torch.autograd.Variable(target_c) - input_c).abs()
loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True)
target_c_sorted = target_c[loss_index]
losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(lovasz_grad(target_c_sorted))))
losses = torch.stack(losses)
if self.reduction == 'none':
loss = losses
elif self.reduction == 'sum':
loss = losses.sum()
else:
loss = losses.mean()
return loss
def forward(self, inputs, targets):
# print(inputs.shape, targets.shape) # (batch size, class_num, x,y,z), (batch size, 1, x,y,z)
inputs, targets = self.prob_flatten(inputs, targets)
# print(inputs.shape, targets.shape)
losses = self.lovasz_softmax_flat(inputs, targets)
return losses
# class net(nn.Module):
# def __init__(self, in_channels, num_classes):
# super(net, self).__init__()
# self.conv = nn.Conv3d(in_channels, num_classes, (1, 3, 3), padding=(0, 1, 1))
# def forward(self, input):
# out = self.conv(input)
# return out
# from torch.optim import Adam
# BS = 2
# num_classes = 8
# dim, hei, wid = 8, 64, 64
# data = torch.rand(BS, num_classes, dim, hei, wid)
# model = net(num_classes, num_classes)
# target = torch.zeros(BS, dim, hei, wid).random_(num_classes)
# Loss = LovaszSoftmax()
# optim = Adam(model.parameters(), lr=0.01,betas=(0.99,0.999))
# for step in range(2):
# out = model(data)
# loss = Loss(out, target)
# optim.zero_grad()
# loss.backward()
# optim.step()
# print(loss)
| StarcoderdataPython |
1678342 | <reponame>AnneGilles/bok-choy
"""
Test basic HTML form input interactions.
"""
from __future__ import absolute_import
from bok_choy.web_app_test import WebAppTest
from .pages import ButtonPage, TextFieldPage, SelectPage, CheckboxPage
class InputTest(WebAppTest):
"""
Test basic HTML form input interactions.
"""
def test_button(self):
button = ButtonPage(self.browser)
button.visit()
button.click_button()
assert button.output == 'button was clicked'
def test_textfield(self):
text_field = TextFieldPage(self.browser)
text_field.visit()
text_field.enter_text('Lorem ipsum')
assert text_field.output == 'Lorem ipsum'
def test_select(self):
select = SelectPage(self.browser)
select.visit()
select.select_car('fiat')
assert select.output == 'Fiat'
self.assertTrue(select.is_car_selected('fiat'))
self.assertFalse(select.is_car_selected('saab'))
self.assertFalse(select.is_car_selected('sedan'))
def test_checkbox(self):
checkbox = CheckboxPage(self.browser)
checkbox.visit()
checkbox.toggle_pill('red')
assert checkbox.output == 'red'
| StarcoderdataPython |
3208398 | <gh_stars>0
from django.db import models
from django.db.models import Sum
from django.utils.translation import gettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from sorl.thumbnail import ImageField
class Department(MPTTModel):
name = models.CharField(max_length=250, unique=True, verbose_name=_("Name"))
parent = TreeForeignKey(
"self", on_delete=models.CASCADE, null=True, blank=True, related_name="children"
)
director = models.ForeignKey(
"hr.staff",
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="director",
verbose_name=_("Director"),
)
def salary_sum(self):
return self.staff_set.aggregate(sum=Sum("salary")).get("sum")
def __str__(self):
return self.name
class Meta:
verbose_name = _("Department")
verbose_name_plural = _("Departments")
class Staff(models.Model):
photo = ImageField(upload_to="photos/staff/", verbose_name=_("Photo"))
first_name = models.CharField(max_length=100, verbose_name=_("First name"))
middle_name = models.CharField(max_length=100, verbose_name=_("Middle name"))
last_name = models.CharField(
max_length=100, db_index=True, verbose_name=_("Last name")
)
position = models.CharField(max_length=100, verbose_name=_("Position"))
salary = models.DecimalField(
max_digits=10, decimal_places=2, verbose_name=_("Salary")
)
date_birth = models.DateField(verbose_name=_("Birth day"))
department = models.ForeignKey(
Department, on_delete=models.PROTECT, verbose_name=_("Department")
)
def __str__(self):
return f"{self.last_name} {self.first_name} {self.middle_name}"
class Meta:
verbose_name = _("Staff")
verbose_name_plural = _("Staff")
unique_together = [["first_name", "middle_name", "last_name", "department"]]
| StarcoderdataPython |
55899 | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class Bindings(NewOpenCVTests):
def test_inheritance(self):
bm = cv.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_redirectError(self):
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
pass
handler_called = [False]
def test_error_handler(status, func_name, err_msg, file_name, line):
handler_called[0] = True
cv.redirectError(test_error_handler)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
self.assertEqual(handler_called[0], True)
pass
cv.redirectError(None)
try:
cv.imshow("", None) # This causes an assert
self.assertEqual("Dead code", 0)
except cv.error as e:
pass
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| StarcoderdataPython |
3214665 | <gh_stars>0
import os
import sys
import shutil
import errno
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import scipy as sp
import scipy.stats
import csv
import logging
sns.set(style="darkgrid")
def delete_dirs(path_to_dir):
_logger = logging.getLogger(__name__)
episode_dirs = glob.glob(os.path.join(path_to_dir) + '/episode_*/')
try:
for episode_dir in episode_dirs:
shutil.rmtree(episode_dir)
except:
_logger.critical("Can't delete directory - %s" % str(episode_dir))
sys.exit()
def create_dir(path_to_dir):
_logger = logging.getLogger(__name__)
if not os.path.isdir(path_to_dir):
try:
os.makedirs(path_to_dir)
except:
_logger.critical("Can't create directory - %s" % str(path_to_dir))
sys.exit()
return path_to_dir
def copy_file(src, dest):
_logger = logging.getLogger(__name__)
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
# eg. source or destination doesn't exist
except IOError as e:
_logger.critical("Can't copy file - %s" % str(e))
sys.exit()
def write_stats_file(path_to_file, *args):
_logger = logging.getLogger(__name__)
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
# creating new line for file
line = ''
for arg in args:
if type(arg) is list:
for elem in arg:
line += str(elem) + ','
else:
line += str(arg) + ','
line = line[:-1] + '\n'
# write to file
try:
file_handle = os.open(path_to_file, flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
with open(path_to_file, 'a+') as f:
f.write(line)
else: # Something unexpected went wrong so reraise the exception.
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
else: # No exception, so the file must have been created successfully.
with os.fdopen(file_handle, 'w') as file_obj:
# Using `os.fdopen` converts the handle to an object that acts
# like a regular Python file object, and the `with` context
# manager means the file will be automatically closed when
# we're done with it.
file_obj.write(line)
def mean_confidence_interval(my_list, confidence=0.95):
my_array = 1.0 * np.array(my_list)
array_mean, array_se = np.mean(my_array), scipy.stats.sem(my_array)
margin = array_se * sp.stats.t._ppf((1 + confidence) / 2.,
len(my_array) - 1)
return array_mean, array_mean - margin, array_mean + margin
def summarize_runs_results(path_to_dir):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
run_files = [os.path.join(run_dir, 'stats_run.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(run_file) for run_file in run_files))
steps = df.groupby(['episode'])['steps_mean']
steps = list(steps)
reward = df.groupby(['episode'])['reward_mean']
reward = list(reward)
effort = df.groupby(['episode'])['step_count']
effort = list(effort)
summary = []
for episode in range(0, len(reward)):
step_mean, step_lower, step_upper = \
mean_confidence_interval(steps[episode][1])
reward_mean, reward_lower, reward_upper = \
mean_confidence_interval(reward[episode][1])
effort_mean, effort_lower, effort_upper = \
mean_confidence_interval(effort[episode][1])
summary.append([int(steps[episode][0]),
step_mean, step_lower, step_upper,
reward_mean, reward_lower, reward_upper,
effort_mean, effort_lower, effort_upper])
header = ['episode', 'steps_mean', 'steps_lower', 'steps_upper',
'reward_mean', 'reward_lower', 'reward_upper',
'effort_mean', 'effort_lower', 'effort_upper']
try:
with open(os.path.join(path_to_dir, 'stats_task.csv'), 'w') \
as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def summarize_runs_policy_choice(path_to_dir, kind='probs'):
_logger = logging.getLogger(__name__)
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
policy_files = [os.path.join(run_dir, 'stats_policy_' + kind + '.csv')
for run_dir in run_dirs]
df = pd.concat((pd.read_csv(policy_usage_file)
for policy_usage_file in policy_files))
policies = list(df)
policies = [policy for policy in policies if 'episode' not in policy]
for policy in policies:
usage = df.groupby(['episode'])[policy]
usage = list(usage)
summary = []
for episode in range(0, len(usage)):
mean_value, lower_value, upper_value = \
mean_confidence_interval(usage[episode][1])
summary.append([int(usage[episode][0]),
mean_value, lower_value, upper_value])
header = ['episode', 'mean', 'lower', 'upper']
try:
with open(os.path.join(path_to_dir,
kind + '_'+str(policy)+'.csv'),
'w') as csvfile:
writer = csv.writer(csvfile,
dialect='excel',
quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(header)
for data in summary:
writer.writerow(data)
except IOError as e:
_logger.critical("Can't write stats file - %s " % str(e))
sys.exit()
def plot_run(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_run.csv'))
# print(df)
for column in df.columns:
plt.figure(figsize=(10, 4), dpi=80)
plt.plot(df['episode'], df[column],
label=column, color='blue', linewidth=2.0)
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_runs(path_to_dir):
run_dirs = glob.glob(os.path.join(path_to_dir) + '/*/')
dfs = []
for run_dir in run_dirs:
dfs.append(pd.read_csv(os.path.join(run_dir, 'stats_run.csv')))
for column in dfs[0].columns:
plt.figure(figsize=(10, 4), dpi=80)
run_count = 1
for df in dfs:
plt.plot(df['episode'], df[column],
label=column+'_'+str(run_count), linewidth=2.0)
run_count += 1
plt.ylabel(column, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend()
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(column) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_task(path_to_dir):
df = pd.read_csv(os.path.join(path_to_dir, 'stats_task.csv'))
factors = ['steps', 'reward', 'effort']
colors = ['blue', 'green', 'red']
for factor, color in zip(factors, colors):
plt.figure(figsize=(10, 4), dpi=80)
if factor == 'steps':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 100.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 100.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 100.0)
if factor == 'reward':
df[factor + '_mean'] = df[factor + '_mean'].clip(0.0, 1.0)
df[factor + '_lower'] = df[factor + '_lower'].clip(0.0, 1.0)
df[factor + '_upper'] = df[factor + '_upper'].clip(0.0, 1.0)
plt.plot(df['episode'], df[factor + '_mean'],
label=factor+'_mean', color=color, linewidth=2.0)
plt.plot(df['episode'], df[factor + '_lower'],
label=factor+'_lower', color=color,
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df[factor + '_upper'],
label=factor+'_upper', color=color,
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_lower'],
facecolor=color, alpha=0.2)
plt.fill_between(df['episode'], df[factor + '_mean'],
df[factor + '_upper'],
facecolor=color, alpha=0.2)
plt.ylabel(factor, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_' + str(factor) + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice(path_to_dir, kind='probs'):
if kind == 'probs':
ylabel = 'policy probability [%]'
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
elif kind == 'U':
ylabel = 'policy usage [count]'
elif kind == 'P':
ylabel = 'policy probability [%]'
else:
pass
df = pd.read_csv(os.path.join(path_to_dir,
'stats_policy_' + kind + '.csv'))
plt.figure(figsize=(10, 4), dpi=80)
df.plot(x='episode')
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.legend(fontsize=14)
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all')
def plot_policy_choice_summary(path_to_dir, kind='probs'):
limit_lower = 0
if kind == 'probs':
ylabel = 'policy probability [%]'
skip = 6
limit_upper = 1.0
elif kind == 'absolute':
ylabel = 'policy mean [steps]'
skip = 9
limit_upper = 100.0
elif kind == 'W':
ylabel = 'Reuse gain [gain per episode]'
skip = 2
limit_upper = 1.0
elif kind == 'W_mean':
ylabel = 'Average Reuse gain []'
skip = 7
limit_upper = 1.0
elif kind == 'U':
ylabel = 'policy usage [count]'
skip = 2
limit_upper = 1000.0
elif kind == 'P':
ylabel = 'policy probability [%]'
skip = 2
limit_upper = 1.0
else:
pass
policy_files = glob.glob(
os.path.join(path_to_dir) + '/' + kind + '_*.csv')
colors = ['red', 'green', 'blue', 'yellow', 'black', 'brown', 'orange']
plt.figure(figsize=(10, 4), dpi=80)
color_count = 0
for policy_file in policy_files:
df = pd.read_csv(policy_file)
policy_name = policy_file.split('/')
policy_name = policy_name[-1].split('.')
policy_name = policy_name[0][skip:]
df['mean'] = df['mean'].clip(limit_lower, limit_upper)
df['lower'] = df['lower'].clip(limit_lower, limit_upper)
df['upper'] = df['upper'].clip(limit_lower, limit_upper)
plt.plot(df['episode'], df['mean'],
label=policy_name, color=colors[color_count], linewidth=2.0)
plt.plot(df['episode'], df['lower'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.plot(df['episode'], df['upper'],
label='_nolegend_', color=colors[color_count],
alpha=0.2, linewidth=1.0)
plt.fill_between(df['episode'], df['mean'],
df['lower'],
facecolor=colors[color_count], alpha=0.2)
plt.fill_between(df['episode'], df['mean'],
df['upper'],
facecolor=colors[color_count], alpha=0.2)
color_count += 1
plt.ylabel(ylabel, fontsize=20, fontweight='bold')
plt.xlabel('episodes', fontsize=20, fontweight='bold')
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.xlim(0, 1000)
plt.legend(fontsize=14, loc='upper left')
plt.savefig(os.path.join(path_to_dir, 'plot_policy_' + kind + '.png'),
bbox_inches='tight')
plt.close('all')
| StarcoderdataPython |
1673624 | <reponame>josephwkim/schedulize
import numpy as np
import pandas as pd
from calParser import obtainSchedule
from audit_parser import audit_info
from lsa_recommender import export_to_master,filter_available_classes
from decision_tree import preference_score,top_preferred_courses
from collaborative_filtering import loadAudits, inputData, buildRecommender, makePrediction, compileDepartScores
from time import time
import json
from CONSTANTS import * | StarcoderdataPython |
3301394 | <reponame>cnheider/vulkan-kompute
"""
Script to handle conversion of compute shaders to spirv and to headers
"""
import os
import sys
import logging
import click
import subprocess
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
is_windows = sys.platform.startswith('win')
CWD=os.path.dirname(os.path.abspath(__file__))
XXD_LINUX_CMD="xxd"
XXD_WINDOWS_CMD=os.path.abspath(os.path.join(CWD, "..\\external\\bin\\", "xxd.exe"))
SHADER_GENERATED_NOTICE = """/*
THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT
---
Copyright 2020 The Institute for Ethical AI & Machine Learning
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
"""
@click.command()
@click.option(
"--shader-path",
"-p",
envvar="KOMPUTE_SHADER_PATH",
required=True,
help="The path for the directory to build and convert shaders",
)
@click.option(
"--shader-binary",
"-s",
envvar="KOMPUTE_SHADER_BINARY",
required=True,
help="The path for the directory to build and convert shaders",
)
@click.option(
"--header-path",
"-c",
envvar="KOMPUTE_HEADER_PATH",
default="",
required=False,
help="The (optional) output file for the cpp header files",
)
@click.option(
"--verbose",
"-v",
envvar="KOMPUTE_HEADER_PATH",
default=False,
is_flag=True,
help="Enable versbosity if flag is provided",
)
def run_cli(
shader_path: str = None,
shader_binary: str = None,
header_path: bool = None,
verbose: bool = None,
):
"""
CLI function for shader generation
"""
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
logger.debug(f"Starting script with variables: {locals()}")
if is_windows:
logger.debug(f"Running on windows, converting input paths")
shader_path = shader_path.replace("/", "\\")
header_path = header_path.replace("/", "\\")
shader_files = []
for root, directory, files in os.walk(shader_path):
for file in files:
if file.endswith(".comp"):
shader_files.append(os.path.join(root, file))
run_cmd = lambda *args: subprocess.check_output([*args]).decode()
logger.debug(f"Output spirv path: {shader_path}")
logger.debug(f"Converting files to spirv: {shader_files}")
spirv_files = []
for file in shader_files:
logger.debug(f"Converting to spirv: {file}")
spirv_file = f"{file}.spv"
run_cmd(shader_binary, "-V", file, "-o", spirv_file)
spirv_files.append(spirv_file)
# Create cpp files if header_path provided
if header_path:
logger.debug(f"Header path provided. Converting bin files to hpp.")
logger.debug(f"Output header path: {shader_path}")
# Check if xxd command options are available
if is_windows:
xxd_cmd = XXD_WINDOWS_CMD
else:
xxd_cmd = XXD_LINUX_CMD
for file in spirv_files:
print(xxd_cmd)
header_data = str(run_cmd(xxd_cmd, "-i", file))
# Ensuring the variable is a static unsigned const
header_data = header_data.replace("unsigned", "static unsigned const")
if is_windows:
raw_file_name = file.split("\\")[-1]
else:
raw_file_name = file.split("/")[-1]
file_name = f"shader{raw_file_name}"
header_file = file_name.replace(".comp.spv", ".hpp")
header_file_define = "SHADEROP_" + header_file.replace(".", "_").upper()
logger.debug(f"Converting to hpp: {file_name}")
with open(os.path.join(header_path, header_file), "w+", newline='\n') as fstream:
fstream.write(f"{SHADER_GENERATED_NOTICE}\n")
fstream.write(f"#ifndef {header_file_define}\n")
fstream.write(f"#define {header_file_define}\n\n")
fstream.write("namespace kp {\n")
fstream.write("namespace shader_data {\n")
fstream.write(f"{header_data}")
fstream.write("}\n")
fstream.write("}\n")
fstream.write(f"#endif // define {header_file_define}\n")
if __name__ == "__main__":
run_cli()
| StarcoderdataPython |
1611451 | <filename>test/sounds.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
test.sounds
~~~~~~~~~~~
Tests various functions for Sanskrit sounds.
:license: MIT and BSD
"""
from builtins import zip
from sanskrit_util import sounds
from . import TestCase
class CleanTestCase(TestCase):
def test(self):
func = sounds.clean
self.assertEqual('kaTam idam', func('kaTam! idam...',
sounds.ALL_TOKENS))
self.assertEqual('kTmdm', func('ka!!!Tamida23m//', sounds.CONSONANTS))
class TransformTestCase(TestCase):
def test_aspirate(self):
func = sounds.aspirate
self.assertEqual('K', func('k'))
self.assertEqual('J', func('J'))
self.assertEqual('a', func('a'))
def test_deaspirate(self):
func = sounds.deaspirate
self.assertEqual('k', func('k'))
self.assertEqual('j', func('J'))
self.assertEqual('a', func('a'))
def test_voice(self):
func = sounds.voice
self.assertEqual('b', func('p'))
self.assertEqual('Q', func('Q'))
self.assertEqual('a', func('a'))
def test_devoice(self):
func = sounds.devoice
self.assertEqual('k', func('k'))
self.assertEqual('C', func('J'))
self.assertEqual('a', func('a'))
def test_nasalize(self):
func = sounds.nasalize
self.assertEqual('N', func('k'))
self.assertEqual('m', func('p'))
self.assertEqual('a', func('a'))
def test_dentalize(self):
func = sounds.dentalize
self.assertEqual('t', func('w'))
self.assertEqual('s', func('z'))
self.assertEqual('a', func('a'))
def test_simplify(self):
func = sounds.simplify
self.assertEqual('k', func('G'))
self.assertEqual('w', func('j'))
self.assertEqual('a', func('a'))
def test_guna(self):
func = sounds.guna
data = list(zip('a A i I u U f F x X e E o O'.split(),
'a A e e o o ar ar al al e E o O'.split()))
for data, output in data:
self.assertEqual(output, func(data))
def test_vrddhi(self):
func = sounds.vrddhi
data = list(zip('a A i I u U f F x X e E o O'.split(),
'A A E E O O Ar Ar Al Al E E O O'.split()))
for data, output in data:
self.assertEqual(output, func(data))
class NumSyllablesTestCase(TestCase):
def test_simple(self):
"""Test some simple syllables."""
func = sounds.num_syllables
self.assertEqual(1, func('a'))
self.assertEqual(1, func('I'))
self.assertEqual(1, func('zwre'))
self.assertEqual(1, func('uM'))
def test_long(self):
"""Test longer phrases."""
func = sounds.num_syllables
self.assertEqual(8, func('Darmakzetre kurukzetre'))
class MeterTestCase(TestCase):
def test_simple(self):
"""Test some simple syllables."""
for v in sounds.SHORT_VOWELS:
self.assertEqual('.', ''.join(sounds.meter(v)))
for v in 'AIUFXeEoO':
self.assertEqual('_', ''.join(sounds.meter(v)))
for groups in ['aM naH yuG']:
self.assertEqual('_', ''.join(sounds.meter(v)))
def test_meghaduta(self):
"""Test some lines from the Meghaduta."""
verse = """
kaScitkAntAvirahaguruRA svADikArapramattaH
SApenAstaMgamitamahimA varzaBogyeRa BartuH .
yakzaScakre janakatanayAsnAnapuRyodakezu
snigDacCAyAtaruzu vasatiM rAmagiryASramezu .. 1 ..
"""
mandakranta = '____.....__.__.__'
for line in verse.strip().splitlines():
scan = sounds.meter(line)
scan[-1] = '_'
self.assertEqual(mandakranta, ''.join(scan))
| StarcoderdataPython |
3342008 | __all__ = ['BioCDocument']
from .compat import _Py2Next
from .meta import _MetaId, _MetaInfons, _MetaRelations, _MetaIter
class BioCDocument(_MetaId, _MetaInfons, _MetaRelations, _MetaIter, _Py2Next):
def __init__(self, document=None):
self.id = ''
self.infons = dict()
self.relations = list()
self.passages = list()
if document is not None:
self.id = document.id
self.infons = document.infons
self.relations = document.relations
self.passages = document.passages
def __str__(self):
s = 'id: ' + self.id + '\n'
s += 'infon: ' + str(self.infons) + '\n'
s += str(self.passages) + '\n'
s += 'relation: ' + str(self.relations) + '\n'
return s
def _iterdata(self):
return self.passages
def get_size(self):
return sum(p.size() for p in self.passages) # As in Java BioC
def clear_passages(self):
self.passages = list()
def add_passage(self, passage):
self.passages.append(passage)
def remove_passage(self, passage):
if isinstance(passage, int):
self.passages.pop(passage)
else:
self.passages.remove(passage) # TBC
| StarcoderdataPython |
139582 | # Copyright (c) 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import json
from qiskit import __version__
from IBMQuantumExperience import IBMQuantumExperience
from packaging import version
import argparse
import warnings
from qiskit import QuantumCircuit
from qiskit.wrapper import load_qasm_file
from qiskit import execute
if (version.parse(__version__) >= version.parse("0.6")):
from qiskit import IBMQ
from qiskit import Aer
class QiskitUnsupportedVersion(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class QiskitTools(object):
"""
Utilities for the Qiskit-terra-related scripts
"""
def __init__(self):
self.PUBLIC_NAMES = {
'ibmq_20_tokyo': 'IBM Q 20 Tokyo',
'QS1_1': 'IBM Q 20 Austin',
'ibmq_16_melbourne': 'IBM Q 16 Melbourne',
'ibmqx5': 'IBM Q 16 Rueschlikon',
'ibmq_16_rueschlikon': 'IBM Q 16 Rueschlikon',
'ibmqx4': 'IBM Q 5 Tenerife',
'ibmq_5_tenerife': 'IBM Q 5 Tenerife',
'ibmqx2': 'IBM Q 5 Yorktown',
'ibmq_5_yorktown': 'IBM Q 5 Yorktown',
'ibmq_qasm_simulator': 'IBM Q QASM Simulator'
}
def executeQASM(self, filename):
if (version.parse(__version__) >= version.parse("0.6") and
(version.parse(__version__) < version.parse("0.7"))):
qc = load_qasm_file(filename)
job_sim = execute(qc, Aer.get_backend("qasm_simulator"))
result = job_sim.result()
return result.get_counts()
elif (version.parse(__version__) >= version.parse("0.7")):
qc = QuantumCircuit.from_qasm_file(filename)
job_sim = execute(qc, Aer.get_backend("qasm_simulator"))
result = job_sim.result()
return result.get_counts()
else:
raise QiskitUnsupportedVersion(
'Qiskit-terra version must be v0.6 or v0.7')
def listRemoteBackends(self, apiToken, url,
hub=None, group=None, project=None):
if version.parse(__version__) >= version.parse("0.6"):
if (hub is None or group is None or project is None):
IBMQ.enable_account(apiToken, url)
else:
IBMQ.enable_account(apiToken, url=url,
hub=hub, group=group,
project=project)
backs = [backend.name() for backend in IBMQ.backends()]
else:
raise QiskitUnsupportedVersion(
'Qiskit-terra version must be > v0.6')
return backs
def listLocalBackends(self):
if version.parse(__version__) >= version.parse("0.6"):
backs = [backend.name() for backend in Aer.backends()]
else:
raise QiskitUnsupportedVersion(
'Qiskit-terra version must be > v0.6')
return backs
def getBackendStatus(self, back, apiToken, url,
hub=None, group=None, project=None):
if version.parse(__version__) >= version.parse("0.6"):
if (hub is None or group is None or project is None):
IBMQ.enable_account(apiToken, url)
else:
IBMQ.enable_account(apiToken, url,
hub=hub, group=group,
project=project)
return self.parseBackendStatus(IBMQ.get_backend(back).status())
else:
raise QiskitUnsupportedVersion(
'Qiskit-terra version must be > v0.6')
def createDeviceStatus(self, back):
return {
'name': self.PUBLIC_NAMES[back],
'status': self.parseBackendStatus(
IBMQ.get_backend(back).status()
)
}
def parseBackendStatus(self, backendStatus):
if (version.parse(__version__) >= version.parse("0.6") and
(version.parse(__version__) < version.parse("0.7"))):
return {
'name': backendStatus['name'],
'pending_jobs': backendStatus['pending_jobs'],
'available': self.parseAvailability(backendStatus)
}
elif (version.parse(__version__) >= version.parse("0.7")):
# The type(backendStatus) is now <class 'qiskit.providers.models.backendstatus.BackendStatus'>
# previously was <class 'qiskit._util.AvailableToOperationalDict'>
return {
'name': backendStatus.backend_name,
'pending_jobs': backendStatus.pending_jobs,
'available': backendStatus.operational
}
else:
raise QiskitUnsupportedVersion(
'Qiskit-terra version must be > v0.6')
def parseAvailability(self, backendStatus):
try:
return backendStatus['available']
except KeyError:
return backendStatus['operational']
| StarcoderdataPython |
3380435 | from pathlib import Path
import os.path
import yaml
from .models import RepositoryModel
from everett.manager import (
ConfigEnvFileEnv,
ConfigManager,
ConfigOSEnv,
)
config = ConfigManager([
# first check for environment variables
ConfigOSEnv(),
# then look in the .env file
ConfigEnvFileEnv('.env'),
])
# SSH settings need to come from env and not a file,
# because other scripts in bin/ also use them.
SSH_DOKKU_HOST = config('SSH_DOKKU_HOST', raise_error=False)
SSH_DOKKU_PORT = config('SSH_DOKKU_PORT', parser=int, default='22')
SSH_DOKKU_USER = config('SSH_DOKKU_USER', default='dokku')
# Some paths are hard coded for now
BASE_PATH = Path(__file__).parents[1]
REPOS_BASE_PATH = BASE_PATH / 'repos'
DEPLOY_LOGS_BASE_PATH = BASE_PATH / 'deploy-logs'
SETTINGS_BASE_PATH = BASE_PATH / 'settings'
# These settings can in theory come from env or file
GITHUB_SECRET = config('GITHUB_SECRET', raise_error=False)
REPOSITORIES = []
LOG_LEVEL = config('LOG_LEVEL', default='INFO')
if os.path.exists(os.path.join(SETTINGS_BASE_PATH, "settings.yaml")):
with open(os.path.join(SETTINGS_BASE_PATH, "settings.yaml")) as fp:
settings_data = yaml.safe_load(fp)
for r in settings_data.get('repositories'):
REPOSITORIES.append(RepositoryModel(r))
| StarcoderdataPython |
1769260 | """Area under uplift curve"""
import typing
import numpy as np
import pandas as pd
import datatable as dt
from h2oaicore.metrics import CustomScorer
class AUUC(CustomScorer):
_description = "Area under uplift curve"
_maximize = True # whether a higher score is better
_perfect_score = 2.0 # AUUC can be slightly > 1.
_supports_sample_weight = True # whether the scorer accepts and uses the sample_weight input
_regression = True
_binary = False
_multiclass = False
_RANDOM_COL = 'Random'
# The following functions get_cumgaim, get_cumlift, and auuc_score are directly copied from the CAUSALML package:
# https://github.com/uber/causalml/blob/v0.10.0/causalml/metrics/visualize.py
# The functions get_cumgain and get_cumlift were copied as is (only the `self` reference was added).
# The auuc_score was modified: the `tmle` parameter was removed since it is not used here.
# The get_cumgaim, get_cumlift, and auuc_score functions are licensed under the Apache 2 license:
#
# Copyright 2019 Uber Technology, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_cumgain(self, df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',
normalize=False, random_seed=42):
"""Get cumulative gains of model estimates in population.
If the true treatment effect is provided (e.g. in synthetic data), it's calculated
as the cumulative gain of the true treatment effect in each population.
Otherwise, it's calculated as the cumulative difference between the mean outcomes
of the treatment and control groups in each population.
For details, see Section 4.1 of Gutierrez and G{\'e}rardy (2016), `Causal Inference
and Uplift Modeling: A review of the literature`.
For the former, `treatment_effect_col` should be provided. For the latter, both
`outcome_col` and `treatment_col` should be provided.
Args:
df (pandas.DataFrame): a data frame with model estimates and actual data as columns
outcome_col (str, optional): the column name for the actual outcome
treatment_col (str, optional): the column name for the treatment indicator (0 or 1)
treatment_effect_col (str, optional): the column name for the true treatment effect
normalize (bool, optional): whether to normalize the y-axis to 1 or not
random_seed (int, optional): random seed for numpy.random.rand()
Returns:
(pandas.DataFrame): cumulative gains of model estimates in population
"""
lift = self.get_cumlift(df, outcome_col, treatment_col, treatment_effect_col, random_seed)
# cumulative gain = cumulative lift x (# of population)
gain = lift.mul(lift.index.values, axis=0)
if normalize:
gain = gain.div(np.abs(gain.iloc[-1, :]), axis=1)
return gain
def get_cumlift(self, df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',
random_seed=42):
"""Get average uplifts of model estimates in cumulative population.
If the true treatment effect is provided (e.g. in synthetic data), it's calculated
as the mean of the true treatment effect in each of cumulative population.
Otherwise, it's calculated as the difference between the mean outcomes of the
treatment and control groups in each of cumulative population.
For details, see Section 4.1 of Gutierrez and G{\'e}rardy (2016), `Causal Inference
and Uplift Modeling: A review of the literature`.
For the former, `treatment_effect_col` should be provided. For the latter, both
`outcome_col` and `treatment_col` should be provided.
Args:
df (pandas.DataFrame): a data frame with model estimates and actual data as columns
outcome_col (str, optional): the column name for the actual outcome
treatment_col (str, optional): the column name for the treatment indicator (0 or 1)
treatment_effect_col (str, optional): the column name for the true treatment effect
random_seed (int, optional): random seed for numpy.random.rand()
Returns:
(pandas.DataFrame): average uplifts of model estimates in cumulative population
"""
assert ((outcome_col in df.columns) and (treatment_col in df.columns) or
treatment_effect_col in df.columns)
df = df.copy()
np.random.seed(random_seed)
random_cols = []
for i in range(10):
random_col = '__random_{}__'.format(i)
df[random_col] = np.random.rand(df.shape[0])
random_cols.append(random_col)
model_names = [x for x in df.columns if x not in [outcome_col, treatment_col,
treatment_effect_col]]
lift = []
for i, col in enumerate(model_names):
sorted_df = df.sort_values(col, ascending=False).reset_index(drop=True)
sorted_df.index = sorted_df.index + 1
if treatment_effect_col in sorted_df.columns:
# When treatment_effect_col is given, use it to calculate the average treatment effects
# of cumulative population.
lift.append(sorted_df[treatment_effect_col].cumsum() / sorted_df.index)
else:
# When treatment_effect_col is not given, use outcome_col and treatment_col
# to calculate the average treatment_effects of cumulative population.
sorted_df['cumsum_tr'] = sorted_df[treatment_col].cumsum()
sorted_df['cumsum_ct'] = sorted_df.index.values - sorted_df['cumsum_tr']
sorted_df['cumsum_y_tr'] = (sorted_df[outcome_col] * sorted_df[treatment_col]).cumsum()
sorted_df['cumsum_y_ct'] = (sorted_df[outcome_col] * (1 - sorted_df[treatment_col])).cumsum()
lift.append(sorted_df['cumsum_y_tr'] / sorted_df['cumsum_tr'] - sorted_df['cumsum_y_ct'] / sorted_df[
'cumsum_ct'])
lift = pd.concat(lift, join='inner', axis=1)
lift.loc[0] = np.zeros((lift.shape[1],))
lift = lift.sort_index().interpolate()
lift.columns = model_names
lift[self._RANDOM_COL] = lift[random_cols].mean(axis=1)
lift.drop(random_cols, axis=1, inplace=True)
return lift
def auuc_score(self, df, outcome_col='y', treatment_col='w', treatment_effect_col='tau', normalize=True, *args, **kwarg):
"""Calculate the AUUC (Area Under the Uplift Curve) score.
Args:
df (pandas.DataFrame): a data frame with model estimates and actual data as columns
outcome_col (str, optional): the column name for the actual outcome
treatment_col (str, optional): the column name for the treatment indicator (0 or 1)
treatment_effect_col (str, optional): the column name for the true treatment effect
normalize (bool, optional): whether to normalize the y-axis to 1 or not
Returns:
(float): the AUUC score
"""
cumgain = self.get_cumgain(df, outcome_col, treatment_col, treatment_effect_col, normalize)
return cumgain.sum() / cumgain.shape[0]
@staticmethod
def do_acceptance_test():
"""
Whether to enable acceptance tests during upload of recipe and during start of Driverless AI.
Acceptance tests perform a number of sanity checks on small data, and attempt to provide helpful instructions
for how to fix any potential issues. Disable if your recipe requires specific data or won't work on random data.
"""
return False
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None,
X: typing.Optional[dt.Frame] = None,
**kwargs) -> float:
if sample_weight is None:
sample_weight = np.ones(len(actual))
assert np.any(sample_weight != 0)
df = pd.DataFrame({
'dai': predicted,
'outcome': actual,
'treatment': sample_weight
})
return self.auuc_score(df, outcome_col='outcome', treatment_col='treatment', treatment_effect_col=None, normalize=True)['dai']
| StarcoderdataPython |
1720695 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 14:59:37 2020
Copyright 2020 by <NAME>.
"""
# %% Imports.
# Standard library imports:
import numpy as np
from scipy.sparse import csr_matrix
# Chebpy imports:
from chebpy.nla import sphankel
# %% Test 1.
col = np.array([1, 2, 3, 4])
H = sphankel(col)
print(csr_matrix.todense(H)) | StarcoderdataPython |
3255846 | import curses
# Initializing Curses
screen = curses.initscr()
# Options area, only one for this and that's starting colors
curses.start_color()
# Setting a variable to True for the while loop to loop through the menu until user quits
bool = True
while bool:
# Ensuring the screen is cleared before loading the menu
screen.clear()
# The Menu portion, probably could be condensed into one line but I prefer it like this until more options are added
screen.addstr("4 Function Calculator in Curses\n", curses.A_BOLD)
screen.addstr("1. Add\n")
screen.addstr("2. Subtract\n")
screen.addstr("3. Multiply\n")
screen.addstr("4. Divide\n")
screen.addstr("5. Exit\n")
screen.addstr("Enter your choice: ")
input = screen.getstr()
screen.clear()
# Try-Except clause to prevent user from entering letters (invalid number entries will be addressed later)
try:
# Simple If statement for the calculator
# The Addition portion
if int(bytes.decode(input)) == 1:
screen.addstr("Enter your first number: ")
num1 = screen.getstr()
screen.addstr("Enter your second number: ")
num2 = screen.getstr()
screen.clear()
sum = int(bytes.decode(num1)) + int(bytes.decode(num2))
screen.addstr("Your sum is: ")
screen.addstr(f"{sum}\n", curses.A_UNDERLINE)
# The Subtraction portion
elif int(bytes.decode(input)) == 2:
screen.addstr("Enter your first number: ")
num1 = screen.getstr()
screen.addstr("Enter your second number: ")
num2 = screen.getstr()
screen.clear()
difference = int(bytes.decode(num1)) - int(bytes.decode(num2))
screen.addstr("Your difference is: ")
screen.addstr(f"{difference}\n", curses.A_UNDERLINE)
# Multiplication portion
elif int(bytes.decode(input)) == 3:
screen.addstr("Enter your first number: ")
num1 = screen.getstr()
screen.addstr("Enter your second number: ")
num2 = screen.getstr()
screen.clear()
product = int(bytes.decode(num1)) * int(bytes.decode(num2))
screen.addstr("Your product is: ")
screen.addstr(f"{product}\n", curses.A_UNDERLINE)
# Division portion
elif int(bytes.decode(input)) == 4:
screen.addstr("Enter your first number: ")
num1 = screen.getstr()
screen.addstr("Enter your second number: ")
num2 = screen.getstr()
screen.clear()
quotient = int(bytes.decode(num1)) / int(bytes.decode(num2))
screen.addstr("Your quotient is: ")
screen.addstr(f"{quotient}\n", curses.A_UNDERLINE)
# The exit handler
elif int(bytes.decode(input)) == 5:
bool = False
curses.endwin()
# Here is where we check to see if the user input a valid number
elif int(bytes.decode(input)) != [1, 2, 3, 4, 5]: # Probably a better way to compare the values
screen.addstr("Error: Invalid Input\n", curses.A_STANDOUT)
screen.addstr("Did you input a number within 1-5?\n")
screen.addstr("Press any key to go back to the menu...")
screen.refresh()
c = screen.getch()
except ValueError as e:
screen.addstr(f"Error: {e}\n", curses.A_STANDOUT)
screen.addstr("(In English: You input an invalid option)\n") # Added this so in case the user isn't a programmer, they could read the error
screen.addstr("Did you input a number within 1-5?\n")
screen.addstr("Press any key to go back to the menu...")
screen.refresh()
c = screen.getch()
screen.refresh()
| StarcoderdataPython |
1627470 | import requests
import datetime
def get_price(start='2013-01-01', end=datetime.date.today().isoformat(), currency='USD'):
r = requests.get('http://api.coindesk.com/v1/bpi/historical/close.json?currency={2}&start={1}&end={0}'
.format(end, start, currency))
data = r.json()['bpi']
x = list(data.keys())
x.sort()
y = [data[e] for e in x]
# transform to date
t = [datetime.datetime.strptime(elm, '%Y-%m-%d') for elm in x]
dt = [(elm - t[0]).days for elm in t]
return {'x': dt, 'y': y, 'labels': x}
| StarcoderdataPython |
1680239 | <filename>pychron/git/hosts/gitlab.py<gh_stars>1-10
# ===============================================================================
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from apptools.preferences.preference_binding import bind_preference
from traits.api import Str
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.git.hosts import GitHostService
class GitLabService(GitHostService):
host = Str
preference_path = 'pychron.gitlab'
name = 'GitLab'
@property
def remote_url(self):
return self.host
def bind_preferences(self):
super(GitLabService, self).bind_preferences()
bind_preference(self, 'host', '{}.host'.format(self.preference_path))
def set_team(self, team, organization, repo, permission=None):
pass
def create_repo(self, name, organization, **kw):
cmd = '{}/orgs/{}/repos'.format(self.host,
organization)
resp = self._post(cmd, name=name, **kw)
if resp:
self.debug('Create repo response {}'.format(resp.status_code))
return resp.status_code == 201
def make_url(self, name, organization):
return 'http://{}/{}/{}.git'.format(self.host, organization, name)
def get_repos(self, organization):
cmd = '{}/groups/{}/projects'.format(self.host, organization)
return self._get(cmd)
def get_info(self, organization):
cmd = '{}/groups/{}'.format(self.host, organization)
return self._get(cmd)
# private
def _get_oauth_token(self):
return 'Bearer {}'.format(self.oauth_token)
# ============= EOF =============================================
| StarcoderdataPython |
1762953 | from .closed import keys as k
from .closed import commands as c
from .closed import modes as m
import copy
keys = copy.deepcopy(k)
commands = copy.deepcopy(c)
modes = copy.deepcopy(m)
keys['a']['word'] = 'a'
keys['b']['word'] = 'b'
keys['c']['word'] = 'c'
keys['d']['word'] = 'd'
keys['e']['word'] = 'e'
keys['f']['word'] = 'f'
keys['g']['word'] = 'g'
keys['h']['word'] = 'h'
keys['i']['word'] = 'i'
keys['j']['word'] = 'j'
keys['k']['word'] = 'k'
keys['l']['word'] = 'l'
keys['m']['word'] = 'm'
keys['n']['word'] = 'n'
keys['o']['word'] = 'o'
keys['p']['word'] = 'p'
keys['q']['word'] = 'q'
keys['r']['word'] = 'r'
keys['s']['word'] = 's'
keys['t']['word'] = 't'
keys['u']['word'] = 'u'
keys['v']['word'] = 'v'
keys['w']['word'] = 'w'
keys['x']['word'] = 'x'
keys['y']['word'] = 'y'
keys['z']['word'] = 'z'
keysyms = {}
for k,v in list(keys.items()):
keysyms[v['word']] = v['keysym']
vocab = {}
for k,v in list(keys.items()):
vocab[k] = v['word']
for k,v in list(commands.items()):
vocab[k] = v
| StarcoderdataPython |
1626609 | """
Performing Range Minimum Queries, Range Maximum Queries, Range Sum Queries in O(log(n)) using a prebuilt structure.
Building a full binary tree and using it for Range Minimum Queries, Range Maximum Queries, Range Sum Queries
(A full binary tree is a tree in which every node has either 0 or 2 children.)
The construction takes O(n) time with O(n) an additional space for the array with the tree;
The update of the range query tree in O(log(n)) time with O(1) additional space (works for max/min as well as for sum)
Here the Range Queries take O(log(n)) time, which is not optimal but often good enough, while the
known (at least to me) structure that gives O(1) time is much more complicated than this one.
The full bin tree we are constructing here is kept in an array (a Python list) in a similar way to the heap structure.
The 0th element corresponds to the root, the indexes of the two children of a node at i are 2*i+1 and 2*i+2, while
the index of the parent of a node at k is (k-1)//2
For the given construction, the root of a subtree will contain a MIN (or Max, or Sum) of the subtree. A variant of the
construction may contain an index in the original array of the MIN (or Max, or Sum) of the subtree.
Examples of using the code for a given array a and range [i,j] (i and j included) are here and also presented
as tests in the __main__ function for this module below.
To find the range minimum:
t = build_helper_tree(a)
min_i_j = rmq(len(a), t, q_st=i, q_end=j)
To find the range maximum:
t = build_helper_tree(a, f=max, ignore=-float('inf))
min_i_j = rmq(len(a), t, q_st=i, q_end=j, f=max, ignore=-float('inf))
To find the range sum:
t = build_helper_tree(a, f=sum, ignore=0)
min_i_j = rmq(len(a), t, q_st=i, q_end=j, f=sum, ignore=0)
To update the prebuilt range query tree t for min, where the new value of a at index j is ch:
new_t = update(len(a), t, idx_change=j, change=ch)
To update the prebuilt range query tree t for max, where the new value of a at index j is ch:
new_t = update(len(a), t, idx_change=j, change=ch, f=max, ignore=-float('inf))
To update the prebuilt range query tree t for sum, where the new value of a at index j is new_value:
new_t = update(len(a), t, idx_change=j, change=(new_value-a[j]), f=sum, ignore=0)
It is also possible to prepare and perform the Range Minimum/Maximum Queries in the second definition of the RMQ,
i.e., to search for the indexes of the max/min in the given ranges, with the same times/space characteristics.
For the examples, see test_range_indexes_min_query() and test_range_indexes_max_query() below.
Thus, to find an index of the minimal element in the given range(i, j+1) of the list a, one can do the following:
a = [5, 3, 7, 4, 8]
b = list(enumerate(a))
def f(lst):
i, mn = lst[0]
for j, x in lst:
if x < mn:
i, mn = j, x
return i, mn
ign = (0, float('inf'))
t = build_helper_tree(b, f=f, ignore=ign)
rmq(len(a), t, i, j, f=f, ignore=ign)[0] # the function returns tuple in the form (index, min_value)
To find the index of the max element in the range, replace the ign and f with the following:
def f(lst):
i, mx = lst[0]
for j, x in lst:
if x > mx:
i, mx = j, x
return i, mx
ign = (0, -float('inf'))
and use the same calls:
t = build_helper_tree(b, f=f, ignore=ign)
rmq(len(a), t, i, j, f=f, ignore=ign)[0] # the function returns tuple in the form (index, max_value)
"""
from math import ceil, log2
def build_helper_tree(a, f=min, ignore=float('inf')):
"""
:param a: the original array (list) the full binary RMQ tree to be constructed for
:param f: min or max (or "similar") function
:param ignore: e.g., it is float('inf') if f==min else -float('inf') if f==max else 0 #if f==sum
:return: the full binary tree to be used for Rage Queriess (RQ) of a
"""
def _bld(start, end, t, idx=0):
if start > end:
t[idx] = ignore
if start == end:
t[idx] = a[start]
else:
mid = start + (end - start) // 2
shift = idx * 2
t[idx] = f([_bld(start, mid, t, idx=(shift + 1)),
_bld(mid + 1, end, t, idx=(shift + 2))])
return t[idx]
n = len(a)
d = 2 ** (int(ceil(log2(n)))) # int was needed in older versions of Python
m = 2 * d - 1 # n leaves => n - 1 internal nodes; tree height = int(ceil(log2(n))
fbt = [ignore for j in range(m)]
_bld(0, n - 1, fbt, idx=0)
return fbt
def rmq(len_a, t, q_st, q_end, f=min, ignore=float('inf')):
"""
:param len_a: the array to search min
:param t: the full binary tree for RQs, has to be prepared before calling this function
:param q_st: starting position of the query range
:param q_end: ending position of the query range
:param f: min or max (or "similar") function
:param ignore: e.g., it is float('inf') if f==min else -float('inf') if f==max else 0 #if f==sum
:return: the value of f of elements of a with the indexes in [q_st, q_end] range including both the ends
"""
def _rmq(start, end, idx=0):
if end < q_st or q_end < start:
return ignore
if q_st <= start and end <= q_end:
return t[idx]
else:
mid = start + (end - start) // 2
shift = idx * 2
return f([_rmq(start, mid, idx=(shift + 1)),
_rmq(mid + 1, end, idx=(shift + 2))])
if q_st < 0 or q_end > len_a - 1 or q_st > q_end:
raise RuntimeError("Invalid range arguments")
return _rmq(start=0, end=len_a - 1, idx=0)
def update(len_a, t, idx_change, change, f=min, ignore=float('inf')):
"""
Given an index in the original array a and the change to be applied at this index, update the
full binary tree to be used for Range Queries
:param len_a: the array to search min
:param t: the full binary tree for RMQ, has to be prepared before calling this function
:param idx_change: the index in a where the element is being updated
:param change: f-specific change to idx_change-th element of a, e.g., a_new[idx_change] for f=sum,
or a_new[idx_change] - a[idx_change] for f = min/max
:param f: min or max (or "similar") function
:param i ignore: e.g., it is float('inf') if f==min else -float('inf') if f==max else 0 #if f==sum
:return: the full binary tree updated with the new value at a given index, to be used for RQs of a
"""
if not (0 <= idx_change <= len_a - 1):
raise IndexError("idx_change=%d is out of bounds" % idx_change)
idx = 0
s, e = 0, len_a - 1
while s <= e and idx < len(t):
t[idx] = f((t[idx], change))
mid = s + (e - s) // 2
if idx_change <= mid:
e = mid
idx = 2 * idx + 1
else:
s = mid + 1
idx = 2 * idx + 2
return t
if __name__ == '__main__':
"""
Keeping some tests here as examples of the usage.
Unit tests are also presented in the tests package.
"""
####################################################################################
def __prepare_stupid_rmq(a, f=min):
return [[f(a[i:j]) for i in range(j)] for j in range(1, len(a) + 1)]
def __stupid_rmq(c, i, j):
return c[j][i] if i < j else c[i][j] if j < i else c[i][i]
def test__prepare_stupid_rmq_1():
assert ([[5], [3, 3], [3, 3, 7], [3, 3, 4, 4], [3, 3, 4, 4, 8]] ==
__prepare_stupid_rmq([5, 3, 7, 4, 8]))
assert ([[5], [5, 3], [7, 7, 7], [7, 7, 7, 4], [8, 8, 8, 8, 8]] ==
__prepare_stupid_rmq([5, 3, 7, 4, 8], f=max))
def test__stupid_rmq_1():
a = [5, 3, 7, 4, 8]
c = __prepare_stupid_rmq(a)
for j in range(1, len(a) + 1):
for i in range(j):
assert c[j - 1][i] == __stupid_rmq(c, i, j - 1)
####################################################################################
def test__build_helper_tree_min_and_max_1():
assert [5] == build_helper_tree([5])
assert [5] == build_helper_tree([5], f=max, ignore=-float('inf'))
def test__build_helper_tree_min_and_max_2():
assert [5, 5, 7] == build_helper_tree([5, 7])
assert [7, 5, 7] == build_helper_tree([5, 7], f=max, ignore=-float('inf'))
def test__build_helper_tree_sum_1():
assert [12, 5, 7] == build_helper_tree([5, 7], f=sum, ignore=0)
def test__build_helper_tree_sum_2():
assert [22, 12, 10, 5, 7, 0, 0] == build_helper_tree([5, 7, 10], f=sum, ignore=0)
def test_range_min_query_1():
a = [5]
t = build_helper_tree([5])
m = rmq(len(a), t, 0, 0)
assert 5 == m
def test_range_max_query_1():
a = [5]
t = build_helper_tree([5], f=max, ignore=-float('inf'))
m = rmq(len(a), t, 0, 0, f=max, ignore=-float('inf'))
assert 5 == m
def test_range_min_query():
a = [5, 3, 7, 4, 8]
c = __prepare_stupid_rmq(a)
t = build_helper_tree(a)
for j in range(1, len(a) + 1):
for i in range(j):
assert __stupid_rmq(c, i, j - 1) == rmq(len(a), t, i, j - 1)
def test_range_max_query():
a = [5, 3, 7, 4, 8]
c = __prepare_stupid_rmq(a, f=max)
t = build_helper_tree(a, f=max, ignore=-float('inf'))
for j in range(1, len(a) + 1):
for i in range(j):
assert __stupid_rmq(c, i, j - 1) == rmq(len(a), t, i, j - 1, f=max, ignore=-float('inf'))
def test_range_sum_query():
a = [5, 3, 7, 4, 8]
c = [[sum(a[i:j]) for i in range(j)] for j in range(1, len(a) + 1)]
t = build_helper_tree(a, f=sum, ignore=0)
for j in range(1, len(a) + 1):
for i in range(j):
assert c[j - 1][i] == rmq(len(a), t, i, j - 1, f=sum, ignore=0)
def test_range_indexes_min_query():
a = [5, 3, 7, 4, 8]
b = list(enumerate(a))
def f(lst):
i, mn = lst[0]
for j, x in lst:
if x < mn:
i, mn = j, x
return i, mn
ign = (0, float('inf'))
t = build_helper_tree(b, f=f, ignore=ign)
# print('t = ',t)
assert [(1, 3), (1, 3), (3, 4), (1, 3), (2, 7),
(3, 4), (4, 8), (0, 5), (1, 3), (0, float('inf')),
(0, float('inf')), (0, float('inf')),
(0, float('inf')), (0, float('inf')), (0, float('inf'))] == build_helper_tree(b, f=f, ignore=ign)
# print('a = ',a)
# print([(i, j-1) for j in range(1, len(a)+1) for i in range(j)])
# print([rmq(len(a), t, i, j - 1, f=f, ignore=ign) for j in range(1, len(a)+1) for i in range(j)])
assert [(0, 5), (1, 3), (1, 3), (1, 3), (1, 3),
(2, 7), (1, 3), (1, 3), (3, 4), (3, 4),
(1, 3), (1, 3), (3, 4), (3, 4), (4, 8)] == [rmq(len(a), t, i, j - 1, f=f, ignore=ign)
for j in range(1, len(a) + 1) for i in range(j)]
def test_range_indexes_max_query():
a = [5, 3, 7, 4, 8]
b = list(enumerate(a))
def f(lst):
i, mx = lst[0]
for j, x in lst:
if x > mx:
i, mx = j, x
return i, mx
ign = (0, -float('inf'))
t = build_helper_tree(b, f=f, ignore=ign)
# print('t = ',t)
assert [(4, 8), (2, 7), (4, 8), (0, 5), (2, 7),
(3, 4), (4, 8), (0, 5), (1, 3),
(0, -float('inf')), (0, -float('inf')),
(0, -float('inf')), (0, -float('inf')),
(0, -float('inf')), (0, -float('inf'))] == build_helper_tree(b, f=f, ignore=ign)
# print('a = ',a)
# print([(i, j-1) for j in range(1, len(a)+1) for i in range(j)])
# print([rmq(len(a), t, i, j - 1, f=f, ignore=ign) for j in range(1, len(a)+1) for i in range(j)])
assert [(0, 5), (0, 5), (1, 3), (2, 7), (2, 7),
(2, 7), (2, 7), (2, 7), (2, 7), (3, 4),
(4, 8), (4, 8), (4, 8), (4, 8), (4, 8)] == [rmq(len(a), t, i, j - 1, f=f, ignore=ign)
for j in range(1, len(a) + 1) for i in range(j)]
def test_update0_min_max_sum():
assert [4] == update(1, [4], idx_change=0, change=4)
assert [6] == update(1, [5], idx_change=0, change=6, f=max, ignore=-float('inf'))
assert [8] == update(1, [5], idx_change=0, change=3, f=sum, ignore=0)
assert [2] == update(1, [5], idx_change=0, change=-3, f=sum, ignore=0)
def test_update_max():
a = [5, 3, 7, 4, 8]
len_a = len(a)
t = build_helper_tree(a)
for j in range(len_a):
a[j] += 2
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=a[j], f=max, ignore=-float('inf'))
assert t2 == t3
a[j] -= 4
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=a[j], f=max, ignore=-float('inf'))
assert t2 == t3
def test_update_min():
a = [5, 3, 7, 4, 8]
len_a = len(a)
t = build_helper_tree(a)
for j in range(len_a):
a[j] += 2
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=a[j])
assert t2 == t3
a[j] -= 4
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=a[j])
assert t2 == t3
def test_update_sum():
a = [5, 3, 7, 4, 8]
len_a = len(a)
t = build_helper_tree(a)
for j in range(len_a):
a[j] += 2
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=2, f=sum, ignore=0)
assert t2 == t3
a[j] -= 4
t2 = build_helper_tree(a)
t3 = update(len_a, t2, idx_change=j, change=-4, f=sum, ignore=0)
assert t2 == t3
test__build_helper_tree_min_and_max_1()
test__build_helper_tree_min_and_max_2()
test__build_helper_tree_sum_1()
test__build_helper_tree_sum_2()
test_range_min_query_1()
test_range_max_query_1()
test__prepare_stupid_rmq_1()
test__stupid_rmq_1()
test_range_min_query()
test_range_max_query()
test_range_sum_query()
test_update0_min_max_sum()
test_update_min()
test_update_max()
test_update_sum()
test_range_indexes_min_query()
test_range_indexes_max_query()
| StarcoderdataPython |
12256 | <reponame>gotcha/salt
# -*- coding: utf-8 -*-
'''
tests.unit.utils.filebuffer_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import salt libs
from saltunittest import TestCase, TestLoader, TextTestRunner
from salt.utils.filebuffer import BufferedReader, InvalidFileMode
class TestFileBuffer(TestCase):
def test_read_only_mode(self):
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='a')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='ab')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='w')
with self.assertRaises(InvalidFileMode):
BufferedReader('/tmp/foo', mode='wb')
if __name__ == "__main__":
loader = TestLoader()
tests = loader.loadTestsFromTestCase(TestFileBuffer)
TextTestRunner(verbosity=1).run(tests)
| StarcoderdataPython |
3217724 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import random
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
from .cross_entropy_acc import LabelSmoothedCrossEntropyWithAccCriterion
random.seed(0)
def logits2sent(preds, targets, dictionary, rate=0.03):
if random.random() < rate:
try:
pred = dictionary.tokenizer.decode(preds)
target = dictionary.tokenizer.decode(targets)
except:
pred = dictionary.string(preds)
target = dictionary.string(targets)
print('pred:\n{}\ntarget:\n{}\n'.format(pred, target))
@register_criterion("qua_ce_acc")
class QuantityCrossEntropyWithAccCriterion(LabelSmoothedCrossEntropyWithAccCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.decoder = self.build_decoder(args, task)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
def build_decoder(self, args, task):
decoder = getattr(args, "decoder", None)
from examples.speech_recognition.cif_decoder import CIFDecoder
if decoder == "cif_decoder":
decoder = CIFDecoder(args, task.target_dictionary, {})
elif decoder == "cif_lm_decoder":
decoder = CIFDecoder(args, task.target_dictionary, ({}, {}))
else:
import pdb; pdb.set_trace()
return decoder
def compute_loss(self, model, net_output, sample, reduction, log_probs):
# number loss
_number = net_output["num_output"]
number = sample["target_lengths"].float()
diff = torch.sqrt(torch.pow(_number - number, 2) + 1e-6).sum()
qua_loss = diff
# alphas_pen
# alphas_pen = net_output["alphas_pen"]
# qua_loss = diff + self.args.lambda_alpha * alphas_pen
target = sample["target"] # no eos bos
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
ce_loss, _ = label_smoothed_nll_loss(
lprobs, target.long(), 0.1, ignore_index=self.padding_idx, reduce=reduction,
)
return lprobs, qua_loss, ce_loss
def get_logging_output(self, sample, lprobs, loss, qua_loss, ce_loss):
target = sample["target"].view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"qua_loss": utils.item(qua_loss.data), # * sample['ntokens'],
"ce_loss": utils.item(ce_loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
lprobs, qua_loss, ce_loss = self.compute_loss(
model, net_output, sample, reduction, log_probs
)
nsentences = sample["target"].size(0) + 1.0
ntokens = sample["ntokens"]
loss = self.args.lambda_qua * qua_loss * ntokens / nsentences + ce_loss
sample_size, logging_output = self.get_logging_output(
sample, lprobs, loss, qua_loss, ce_loss
)
if not model.training:
import editdistance
c_err = 0
c_len = 0
self.decoder.step_forward_fn = model.decoder
with torch.no_grad():
decodeds = self.decoder.generate([model], sample)
for decoded, t in zip(decodeds, sample["target"]):
decoded = decoded[0]['tokens']
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units_arr = targ.tolist()
pred_units_arr = decoded.tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ce_loss = sum(log['ce_loss'] for log in logging_outputs)
qua_loss = sum(log['qua_loss'] for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size if sample_size > 0 else 0.0,
"ce_loss": ce_loss / sample_size if sample_size > 0 else 0.0,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = ce_loss / ntokens
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
c_total = sum(log.get("c_total", 1) for log in logging_outputs)
if c_total > 1:
agg_output["uer"] = c_errors * 100.0 / c_total
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
@register_criterion("qua_ce_acc_v2")
class QuantityCrossEntropyWithAccCriterionV2(LabelSmoothedCrossEntropyWithAccCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
def compute_loss(self, model, net_output, sample, reduction, log_probs):
# number loss
_number = net_output["num_output"]
number = sample["target_lengths"].float()
diff = torch.sqrt(torch.pow(_number - number, 2) + 1e-6).sum()
qua_loss = diff
# alphas_pen
# alphas_pen = net_output["alphas_pen"]
# qua_loss = diff + self.args.lambda_alpha * alphas_pen
target = sample["target"] # no eos bos
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
ce_loss, _ = label_smoothed_nll_loss(
lprobs, target.long(), 0.1, ignore_index=self.padding_idx, reduce=reduction,
)
return lprobs, qua_loss, ce_loss
def get_logging_output(self, sample, lprobs, loss, qua_loss, ce_loss):
target = sample["target"].view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"qua_loss": utils.item(qua_loss.data), # * sample['ntokens'],
"ce_loss": utils.item(ce_loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
num_output = net_output["num_output"].int()
if model.training:
lprobs, qua_loss, ce_loss = self.compute_loss(
model, net_output, sample, reduction, log_probs
)
nsentences = sample["target"].size(0) + 1.0
ntokens = sample["ntokens"]
loss = self.args.lambda_qua * qua_loss * ntokens / nsentences + ce_loss
sample_size, logging_output = self.get_logging_output(
sample, lprobs, loss, qua_loss, ce_loss
)
else:
import editdistance
loss = qua_loss = sample_size = 0.0
logging_output = {
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size
}
c_err = 0
c_len = 0
with torch.no_grad():
for logits, l, t in zip(net_output['logits'], num_output, sample["target"]):
decoded = logits.argmax(dim=-1)[:l]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units_arr = targ.tolist()
pred_units_arr = decoded.tolist()
# targ_units_arr = targ.unique_consecutive().tolist()
# pred_units_arr = decoded.unique_consecutive().tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ce_loss = sum(log.get("ce_loss", 0) for log in logging_outputs)
qua_loss = sum(log.get("qua_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size if sample_size > 0 else 0.0,
"ce_loss": ce_loss / sample_size if sample_size > 0 else 0.0,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = ce_loss / ntokens
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
c_total = sum(log.get("c_total", 1) for log in logging_outputs)
if c_total > 1:
agg_output["uer"] = c_errors * 100.0 / c_total
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
@register_criterion("nar_qua_ctc_ce")
class NAR_QUA_CTC_CE(QuantityCrossEntropyWithAccCriterionV2):
def compute_loss(self, model, net_output, sample, reduction, log_probs):
pad_id = self.task.target_dictionary.pad()
# number loss
_number = net_output["num_output"]
number = sample["target_lengths"].float()
diff = torch.sqrt(torch.pow(_number - number, 2) + 1e-12).sum()
qua_loss = diff
lprobs_ctc, lprobs = model.get_normalized_probs(net_output, retrun_ctc=True, log_probs=log_probs)
# CE loss
pred_mask = net_output["pred_mask"]
lprobs = lprobs.view(-1, lprobs.size(-1)) # N, T, D -> N * T, D
target_masked = torch.where(
pred_mask,
sample["target"],
torch.ones_like(sample["target"]) * pad_id).long().view(-1)
# target_masked = sample["target"].long().view(-1)
ce_loss, _ = label_smoothed_nll_loss(
lprobs,
target_masked,
0.0,
ignore_index=pad_id,
reduce=reduction,
)
# CTC loss
target = sample["target"]
pad_mask = target != pad_id
targets_flat = target.masked_select(pad_mask)
target_lengths = sample["target_lengths"]
len_lprobs = net_output["len_logits_ctc"]
with torch.backends.cudnn.flags(enabled=False):
ctc_loss = F.ctc_loss(
lprobs_ctc.transpose(0, 1), # T x B x V
targets_flat,
len_lprobs,
target_lengths,
blank=self.task.target_dictionary.blk(),
reduction="sum",
zero_infinity=True,
)
return lprobs, target_masked, ctc_loss, qua_loss, ce_loss
def forward(self, model, sample, reduction="sum", log_probs=True):
nsentences = sample["target"].size(0)
ntokens = sample["ntokens"]
if model.training:
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
gold_rate = net_output["gold_rate"]
sample_size = net_output["pred_mask"].float().sum()
lprobs, targets, ctc_loss, qua_loss, ce_loss = self.compute_loss(
model, net_output, sample, reduction, log_probs
)
e_len = int(sum(abs(sample["target_lengths"].data - num_output.data)))
loss = ce_loss + \
self.args.lambda_qua * qua_loss * sample_size / nsentences + \
self.args.lambda_ctc * ctc_loss * sample_size / ntokens
sample_size, logging_output = self.get_logging_output(
sample, lprobs, targets, e_len, loss, ctc_loss, qua_loss, ce_loss, gold_rate
)
else:
import editdistance
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
loss = sample_size = 0.0
logging_output = {
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size
}
c_err = 0
c_len = 0
e_len = 0
with torch.no_grad():
for i, logits, l, t in zip(range(9999), net_output['logits'], num_output, sample["target"]):
# decoded = logits.argmax(dim=-1)[:l]
p = t != self.task.target_dictionary.pad()
decoded = logits.argmax(dim=-1)[:l]
targ = t[p]
targ_units_arr = targ.tolist()
pred_units_arr = decoded.tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
e_len += abs(len(targ_units_arr) - len(pred_units_arr)) * 1.0
logits2sent(pred_units_arr, targ_units_arr, model.tgt_dict, rate=0.03)
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
logging_output["e_len"] = e_len
return loss, sample_size, logging_output
def get_logging_output(self, sample, lprobs, targets, e_len, loss, ctc_loss, qua_loss, ce_loss, gold_rate):
mask = targets != 0
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == targets.masked_select(mask)
)
total = torch.sum(mask)
sample_size = max(utils.item(mask.sum().float().data), 1.0)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ctc_loss": utils.item(ctc_loss.data),
"qua_loss": utils.item(qua_loss.data), # * sample['ntokens'],
"ce_loss": utils.item(ce_loss.data), # * sample['ntokens'],
"gold_rate": gold_rate,
"ntokens": sample_size,
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"e_len": e_len
}
return sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
e_len = sum(log.get("e_len", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ctc_loss = sum(log.get("ctc_loss", 0) for log in logging_outputs)
ce_loss = sum(log.get("ce_loss", 0) for log in logging_outputs)
qua_loss = sum(log.get("qua_loss", 0) for log in logging_outputs)
gold_rate = sum(log.get("gold_rate", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
if sample_size > 0: # training
agg_output = {
"loss": loss_sum / sample_size if sample_size > 0 else 0.0,
"ctc_loss": ctc_loss / ntokens if ntokens > 0 else 0.0,
"ce_loss": ce_loss / sample_size if sample_size > 0 else 0.0,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
"e_len": e_len / nsentences if nsentences > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"gold_rate": gold_rate / len(logging_outputs),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
else:
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
c_total = sum(log.get("c_total", 1) for log in logging_outputs)
agg_output = {
"uer": c_errors * 100.0 / c_total,
"qua_error": e_len * 100.0 / c_total,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
"gold_rate": gold_rate,
"ntokens": ntokens,
"nsentences": nsentences
}
return agg_output
@register_criterion("ctc_cif_bert")
class CTC_CIF_BERT(NAR_QUA_CTC_CE):
def compute_loss(self, model, net_output, sample, reduction, log_probs):
pad_id = self.task.target_dictionary.pad()
# number loss
_number = net_output["num_output"]
number = sample["target_lengths"].float()
diff = torch.sqrt(torch.pow(_number - number, 2) + 1e-12).sum()
qua_loss = diff
target = sample["target"].view(-1) # N, T -> N * T
lprobs_ctc, lprobs = model.get_normalized_probs(net_output, retrun_ctc=True, log_probs=log_probs)
pred_mask = net_output["pred_mask"]
lprobs = lprobs.view(-1, lprobs.size(-1)) # N, T, D -> N * T, D
target_masked = torch.where(
pred_mask,
sample["target"],
torch.ones_like(sample["target"]) * pad_id).long().view(-1)
ce_loss, _ = label_smoothed_nll_loss(
lprobs,
target_masked,
0.0,
ignore_index=self.task.target_dictionary.pad(),
reduce=reduction,
)
# CTC loss
target = sample["target"]
pad_mask = target != self.task.target_dictionary.pad()
targets_flat = target.masked_select(pad_mask)
target_lengths = sample["target_lengths"]
len_lprobs = net_output["len_logits_ctc"]
with torch.backends.cudnn.flags(enabled=False):
ctc_loss = F.ctc_loss(
lprobs_ctc.transpose(0, 1), # T x B x V
targets_flat,
len_lprobs,
target_lengths,
blank=self.task.target_dictionary.pad(),
reduction="sum",
zero_infinity=True,
)
pred_mask = net_output["pred_mask"]
target_masked = (sample["target"] * pred_mask[:, 1:-1]).view(-1)
return lprobs, target_masked, ctc_loss, qua_loss, ce_loss
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
nsentences = sample["target"].size(0)
ntokens = sample["ntokens"]
if model.training:
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
gold_rate = net_output["gold_rate"]
sample_size = net_output["pred_mask"].float().sum()
lprobs, targets, ctc_loss, qua_loss, ce_loss = self.compute_loss(
model, net_output, sample, reduction, log_probs
)
e_len = int(sum(abs(sample["target_lengths"].data - num_output.data)))
loss = ce_loss + \
self.args.lambda_qua * qua_loss * sample_size / nsentences + \
self.args.lambda_ctc * ctc_loss * sample_size / ntokens
sample_size, logging_output = self.get_logging_output(
sample, lprobs, targets, e_len, loss, ctc_loss, qua_loss, ce_loss, gold_rate
)
else:
import editdistance
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
loss = sample_size = 0.0
logging_output = {
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size
}
c_err = 0
c_len = 0
e_len = 0
with torch.no_grad():
for i, logits, l, t in zip(range(9999), net_output['logits'], num_output, sample["target"]):
# decoded = logits.argmax(dim=-1)[:l]
p = t != self.task.target_dictionary.pad()
decoded = logits.argmax(dim=-1)[:l]
targ = t[p]
targ_units_arr = targ.tolist()
pred_units_arr = decoded.tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
e_len += abs(len(targ_units_arr) - len(pred_units_arr)) * 1.0
logits2sent(pred_units_arr, targ_units_arr, model.tgt_dict, rate=0.03)
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
logging_output["e_len"] = e_len
return loss, sample_size, logging_output
@register_criterion("qua_ctc_ce")
class QUA_CTC_CE(QuantityCrossEntropyWithAccCriterionV2):
def compute_loss(self, model, net_output, sample, reduction, log_probs):
# number loss
_number = net_output["num_output"]
number = sample["target_lengths"].float()
diff = torch.sqrt(torch.pow(_number - number, 2) + 1e-12).sum()
qua_loss = diff
# N, T -> N * T
target = sample["target"].view(-1)
lprobs_ctc, lprobs = model.get_normalized_probs(net_output, retrun_ctc=True, log_probs=log_probs)
lprobs = lprobs.view(-1, lprobs.size(-1))
# N, T, D -> N * T, D
ce_loss, _ = label_smoothed_nll_loss(
lprobs,
target.long(),
0.0,
ignore_index=self.task.target_dictionary.pad(),
reduce=reduction,
)
# CTC loss
target = sample["target"]
pad_mask = target != self.task.target_dictionary.pad()
targets_flat = target.masked_select(pad_mask)
target_lengths = sample["target_lengths"]
len_lprobs = net_output["len_logits_ctc"]
with torch.backends.cudnn.flags(enabled=False):
ctc_loss = F.ctc_loss(
lprobs_ctc.transpose(0, 1), # T x B x V
targets_flat,
len_lprobs,
target_lengths,
blank=self.task.target_dictionary.blk(),
reduction="sum",
zero_infinity=True,
)
return lprobs, ctc_loss, qua_loss, ce_loss
def get_logging_output(self, sample, lprobs, e_len, loss, ctc_loss, qua_loss, ce_loss, gold_rate):
targets = sample["target"].view(-1)
mask = targets != 100
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == targets.masked_select(mask)
)
total = torch.sum(mask)
sample_size = max(utils.item(mask.sum().float().data), 1.0)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ctc_loss": utils.item(ctc_loss.data),
"qua_loss": utils.item(qua_loss.data), # * sample['ntokens'],
"ce_loss": utils.item(ce_loss.data), # * sample['ntokens'],
"gold_rate": gold_rate,
"ntokens": sample_size,
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"e_len": e_len
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
nsentences = sample["target"].size(0)
ntokens = sample["ntokens"]
if model.training:
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
gold_rate = net_output["gold_rate"] if "gold_rate" in net_output else 0.0
lprobs, ctc_loss, qua_loss, ce_loss = self.compute_loss(
model, net_output, sample, reduction, log_probs
)
e_len = int(sum(abs(sample["target_lengths"].data - num_output.data)))
loss = ce_loss + \
self.args.lambda_qua * qua_loss * ntokens / nsentences + \
self.args.lambda_ctc * ctc_loss
sample_size, logging_output = self.get_logging_output(
sample, lprobs, e_len, loss, ctc_loss, qua_loss, ce_loss, gold_rate
)
else:
import editdistance
net_output = model(**sample["net_input"])
num_output = torch.round(net_output["num_output"]).int()
loss = sample_size = 0.0
logging_output = {
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size
}
c_err = 0
c_len = 0
e_len = 0
with torch.no_grad():
for i, logits, l, t in zip(range(9999), net_output['logits'], num_output, sample["target"]):
# decoded = logits.argmax(dim=-1)[:l]
p = t != self.task.target_dictionary.pad()
decoded = logits.argmax(dim=-1)[:l]
targ = t[p]
targ_units_arr = targ.tolist()
pred_units_arr = decoded.tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
e_len += abs(len(targ_units_arr) - len(pred_units_arr)) * 1.0
logits2sent(pred_units_arr, targ_units_arr, model.tgt_dict, rate=0.03)
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
logging_output["e_len"] = e_len
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
e_len = sum(log.get("e_len", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ctc_loss = sum(log.get("ctc_loss", 0) for log in logging_outputs)
ce_loss = sum(log.get("ce_loss", 0) for log in logging_outputs)
gold_rate = sum(log.get("gold_rate", 0) for log in logging_outputs)
qua_loss = sum(log.get("qua_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
if sample_size > 0: # training
agg_output = {
"loss": loss_sum / sample_size if sample_size > 0 else 0.0,
"ctc_loss": ctc_loss / ntokens if ntokens > 0 else 0.0,
"ce_loss": ce_loss / sample_size if sample_size > 0 else 0.0,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
"e_len": e_len / nsentences if nsentences > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
"gold_rate": gold_rate / len(logging_outputs),
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
else:
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
c_total = sum(log.get("c_total", 1) for log in logging_outputs)
agg_output = {
"uer": c_errors * 100.0 / c_total,
"qua_error": e_len * 100.0 / c_total,
"qua_loss": qua_loss / nsentences if nsentences > 0 else 0.0,
"ntokens": ntokens,
"nsentences": nsentences
}
return agg_output
| StarcoderdataPython |
4828997 | <reponame>rohe/otest
"""
Assertion test module
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by <NAME>.
:license: APACHE 2.0, see LICENSE for more details.
"""
from future.backports.urllib.parse import parse_qs
import json
import inspect
import traceback
import sys
from otest.events import EV_PROTOCOL_REQUEST
from otest.events import EV_PROTOCOL_RESPONSE
from otest.events import EV_REDIRECT_URL
from otest.events import EV_RESPONSE
from otest.events import EV_HTTP_RESPONSE
from oic.oic import message
__author__ = 'rolandh'
INFORMATION = 0
OK = 1
WARNING = 2
ERROR = 3
CRITICAL = 4
INTERACTION = 5
INCOMPLETE = 6
NOT_APPLICABLE = 7
STATUSCODE = ["INFORMATION", "OK", "WARNING", "ERROR", "CRITICAL",
"INTERACTION", 'PARTIAL RESULT']
STATUSCODE_TRANSL = dict([(STATUSCODE[i], i) for i in range(len(STATUSCODE))])
END_TAG = "==== END ===="
class TestResult(object):
name = 'test_result'
def __init__(self, test_id, status=OK, name='', mti=False, message='',
**kwargs):
self.test_id = test_id
self.status = status
self.name = name
self.mti = mti
self.message = message
self.http_status = 0
self.cid = ''
self.extra = kwargs
def __str__(self):
if self.status:
return '{}: status={}, message={}'.format(self.test_id,
STATUSCODE[self.status],
self.message)
else:
return '{}: status=?'.format(self.test_id)
class State(object):
name = 'state'
def __init__(self, test_id, status, name='', mti=False, message='',
context='', **kwargs):
self.test_id = test_id
self.status = status
self.name = name
self.mti = mti
self.message = message
self.context = context
self.kwargs = kwargs
def __str__(self):
_info = {
'ctx': self.context, 'id': self.test_id,
'stat': STATUSCODE[self.status], 'msg': self.message
}
if self.status != OK:
if self.context:
txt = '{ctx}:{id}: status={stat}, message={msg}'.format(
**_info)
else:
txt = '{id}: status={stat}, message={msg}'.format(**_info)
else:
if self.context:
txt = '{ctx}:{id}: status={stat}'.format(**_info)
else:
txt = '{id}: status={stat}'.format(**_info)
if self.name:
txt = '{} [{}]'.format(txt, self.name)
return txt
class Check(object):
""" General test
"""
cid = "check"
msg = "OK"
mti = True
state_cls = State
def __init__(self, **kwargs):
self._status = OK
self._message = ""
self.content = None
self.url = ""
self._kwargs = kwargs
def _func(self, conv):
return TestResult('')
def __call__(self, conv=None, output=None):
_stat = self._func(conv)
if isinstance(_stat, dict):
_stat = self.response(**_stat)
if output is not None:
output.append(_stat)
return _stat
def response(self, **kwargs):
try:
name = " ".join(
[str(s).strip() for s in self.__doc__.strip().split("\n")])
except AttributeError:
name = ""
res = self.state_cls(test_id=self.cid, status=self._status, name=name,
mti=self.mti)
if self._message:
res.message = self._message
else:
if self._status != OK:
res.message = self.msg
for key, val in kwargs.items():
setattr(self, key, val)
return res
class ExpectedError(Check):
pass
class CriticalError(Check):
status = CRITICAL
class Information(Check):
status = INFORMATION
class Warnings(Check):
status = WARNING
class Error(Check):
status = ERROR
class ResponseInfo(Information):
"""Response information"""
def _func(self, conv=None):
self._status = self.status
_msg = conv.events.last_item(EV_RESPONSE)
if isinstance(_msg, str):
self._message = _msg
else:
self._message = _msg.to_dict()
return {}
class WrapException(CriticalError):
"""
A runtime exception
"""
cid = "exception"
msg = "Test tool exception"
def _func(self, conv=None):
self._status = self.status
self._message = traceback.format_exception(*sys.exc_info())
return {}
class Other(CriticalError):
""" Other error """
msg = "Other error"
class CheckHTTPResponse(CriticalError):
"""
Checks that the HTTP response status is within a specified range
"""
cid = "http_response"
msg = "Incorrect HTTP status_code"
def _func(self, conv):
_response = conv.events.last_item(EV_HTTP_RESPONSE)
res = {}
if not _response:
return res
if 'status_code' in self._kwargs:
if _response.status_code not in self._kwargs['status_code']:
self._status = self.status
self._message = self.msg
res["http_status"] = _response.status_code
else:
if _response.status_code >= 400:
self._status = self.status
self._message = self.msg
res["http_status"] = _response.status_code
return res
def factory(cid):
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
try:
if obj.cid == cid:
return obj
except AttributeError:
pass
return None
def get_provider_info(conv):
_pi = conv.entity.provider_info
if not _pi:
_pi = conv.provider_info
return _pi
def get_protocol_response(conv, cls):
return conv.events.get_messages(EV_PROTOCOL_RESPONSE, cls)
def get_protocol_request(conv, cls):
return conv.events.get_messages(EV_PROTOCOL_REQUEST, cls)
def get_id_tokens(conv):
res = []
# In access token responses
for inst in get_protocol_response(conv, message.AccessTokenResponse):
try:
res.append(inst["id_token"])
except KeyError:
pass
# implicit, id_token in authorization response
for inst in get_protocol_response(conv, message.AuthorizationResponse):
try:
res.append(inst["id_token"])
except KeyError:
pass
return res
def get_signed_id_tokens(conv):
res = []
for item in conv.events.get_data(EV_RESPONSE):
if isinstance(item, dict):
ent = item
else:
try:
ent = json.loads(item)
except Exception as err:
try:
ent = parse_qs(item)
except:
continue
else:
try:
res.append(ent['id_token'][0])
except KeyError:
pass
else:
continue
try:
res.append(ent['id_token'])
except KeyError:
pass
return res
def get_authorization_request(conv, cls):
authz_req = conv.events.get_data(EV_REDIRECT_URL)[0].split('?')[1]
return cls().from_urlencoded(authz_req)
| StarcoderdataPython |
174784 | <reponame>nfirvine/netgeardisc
import socket
import time
import sys
if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
print('netgeardisc <my IP>')
sys.exit(1)
else:
me = sys.argv[1]
p = b'000200000000000000000000000100000c07d2f20000000000000000000000000000000000000000'
sout = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sout.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sout.bind((me, 64513))
try:
while True:
sout.sendto(p, ('<broadcast>', 64515))
time.sleep(0.5)
except KeyboardInterrupt:
sout.close()
| StarcoderdataPython |
1709993 | import http.client
from http.server import HTTPServer, BaseHTTPRequestHandler
import ssl
import socketserver
from cgi import parse_header, parse_multipart
from urllib.parse import parse_qs
import http.client
import logging
logging.basicConfig(filename="log",
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger('ssl server log')
stop = 1
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
print('[*] GET', self.headers['Host'] + "/" + self.path)
hs = {}
for h in self.headers:
#if str(h) != 'Accept-Encoding':
hs[h] = self.headers[h]
print("\t\tcookie: ", self.headers['Cookie'])
print("\n")
for i in range(10):
try:
ctx = ssl._create_default_https_context()
ctx.check_hostname = False
conn = http.client.HTTPSConnection(self.headers['Host'], context=ctx, check_hostname=False)
conn.request("GET", self.path, headers=hs) # TODO check if headers are working
r1 = conn.getresponse()
data1 = r1.read()
self.send_response(r1.status)
for h in r1.headers:
self.send_header(h, r1.headers[h])
self.end_headers()
self.wfile.write(data1)
return
except Exception as e:
logger.error("while do_GET" + str(e))
def parse_POST(self):
try:
ctype, pdict = parse_header(self.headers['content-type'])
if ctype == 'multipart/form-data':
postvars = parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers['content-length'])
try:
self.params = self.rfile.read(length)
d = self.params.decode('ascii')
postvars = parse_qs(d)
except Exception as e:
print(str(e))
postvars = {}
else:
postvars = {}
return postvars
except Exception as e:
logger.error("while parsing post variables")
def do_POST(self):
try:
print("[*] POST " + self.headers['Host'] + " " + self.path)
tmp = self.parse_POST()
postvars = {}
print("\t\tcookie: ", self.headers['Cookie'])
for v in tmp:
postvars[v] = tmp[v][0]
print("\t\tPOST variables:")
for v in postvars:
print("\t\t\t", v, ": ", postvars[v])
print("\n")
hs = {}
for h in self.headers:
hs[h] = self.headers[h]
conn = http.client.HTTPSConnection(self.headers['Host'])
conn.request("POST", self.path, self.params, self.headers) # TODO check if headers are working
r1 = conn.getresponse()
data1 = r1.read()
self.send_response(r1.status)
for h in r1.headers:
self.send_header(h, r1.headers[h])
self.end_headers()
self.wfile.write(data1)
except Exception as e:
logger.error("in do_POST: " + str(e))
def log_message(self, format, *args):
return
class ForkingHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
def finish_request(self, request, client_address):
request.settimeout(30)
HTTPServer.finish_request(self, request, client_address)
def start_server(host_ip):
global stop
while stop:
try:
print("[*] SSL server running...")
httpd = ForkingHTTPServer((host_ip, 4433), MyHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile='cert.pem', keyfile='key.pem', server_side=True,
cert_reqs=ssl.CERT_OPTIONAL, ssl_version=ssl.PROTOCOL_TLSv1)
httpd.serve_forever()
except Exception as e:
print(str(e))
finally:
httpd.socket.close()
httpd.server_close()
print("[*] SSL server stopped") | StarcoderdataPython |
3238247 | <filename>data/plots/src/plot_2D_electrochem.py<gh_stars>0
from utils import *
import numpy
import matplotlib.pyplot as plt
import os, os.path
from scipy.constants import k, e, electron_volt, epsilon_0
pixels = (512, 512)
quantities = ("V", "c_p", "c_n", "zflux_cp", "zflux_cn")
units = ("V", "mol/L", "mol/L", "mol/(m$^{2}$*s)", "mol/(m$^{2}$*s)")
Vg_all = [0.001, 0.025, 0.05, 0.1, 0.15, 0.2] + list(numpy.arange(0.25, 1.35, 0.1))
file_template = "{0}.npy"
concentrations = (0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1)
ratio_conc = 10**3
# get the index of column in the new matrix
def get_col_index(Vg, quantity):
idx_V = Vg_all.index(Vg)
idx_quant = quantities.index(quantity)
len_quant = len(quantities)
return 2 + len_quant * idx_V + idx_quant
out_path = "../result/concentration/2D/"
plot_path = "../plot/concentration"
# Vg_plot = (0.001, 0.15)
Vg_plot = (0.15,)
pairs = [("c_p", 1), ("c_n", -1)]
T = 300
plt.style.use("science")
for conc in [0.001]:
file_name = os.path.join(out_path, file_template.format(conc))
data = get_data(file_name) # already in nm
X = data[:, 0].reshape(*pixels); Y = data[:, 1].reshape(*pixels)
x = data[:, 0]; y = data[:, 1]
# cond = numpy.where(x)
# print(X[1, 1], Y[1, 1])
for quant, z in pairs:
for Vg in Vg_plot:
print(get_col_index(Vg, quant))
c = data[:, get_col_index(Vg, quant)]
c[numpy.isnan(c)] = 0
print(numpy.max(c), numpy.min(c))
v = numpy.nan_to_num(data[:, get_col_index(Vg, "V")])
v0 = numpy.mean(v[y>19.0])
mu = (k * T * numpy.log(c / (conc * ratio_conc)) + z * e * v) / electron_volt
# mu = (z * e * v) / electron_volt
mu = (k * T * numpy.log(c / (conc * ratio_conc))) / electron_volt
mu0 = numpy.mean(mu[y>19.5])
mu = mu - mu0
D = mu.reshape(*pixels)
D[numpy.isinf(D)] = 0
print(numpy.max(D), numpy.min(D))
plt.cla()
fig = plt.figure(figsize=(2.8, 2.8))
ax = fig.add_subplot(111)
# if z > 0:
# vmin = -0.10; vmax = 0.01
# else:
# vmin = 0.008; vmax = 0.011
mesh = plot_data(ax, X, Y, D)
mesh.set_cmap("jet")
ax.set_title("{0} mol/L-{1} V-{2}".format(conc,
Vg,
quant
))
ax.set_xlabel("$r$ (nm)")
ax.set_ylabel("$z$ (nm)")
add_graphene(ax, R_p=10)
fig.colorbar(mesh, fraction=0.03)
fig.tight_layout()
outfile = os.path.join(plot_path,
"mu-{0}-{1}-{2}.svg".format(conc,
Vg,
quant))
print(outfile)
fig.savefig(outfile)
| StarcoderdataPython |
1754040 | <gh_stars>1-10
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from __future__ import absolute_import, print_function
import datetime
import logging
import sys
import time
import threading
import gevent
from volttron.platform.vip.agent import Agent, Core, compat
from volttron.platform.vip.agent.utils import build_agent
from volttron.platform.agent.base_historian import BaseHistorian, add_timing_data_to_header
from volttron.platform.agent import utils
from volttron.platform.keystore import KnownHostsStore
from volttron.platform.messaging import topics, headers as headers_mod
from volttron.platform.messaging.health import STATUS_BAD, Status
from volttron.platform.agent.known_identities import PLATFORM_HISTORIAN
DATAMOVER_TIMEOUT_KEY = 'DATAMOVER_TIMEOUT_KEY'
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '0.1'
def historian(config_path, **kwargs):
config = utils.load_config(config_path)
destination_vip = config.get('destination-vip', None)
assert destination_vip is not None
hosts = KnownHostsStore()
serverkey = hosts.serverkey(destination_vip)
if serverkey is not None:
config['destination-serverkey'] = serverkey
else:
assert config.get('destination-serverkey') is not None
_log.info("Destination serverkey not found in known hosts file, "
"using config")
utils.update_kwargs_with_config(kwargs, config)
return DataMover(**kwargs)
class DataMover(BaseHistorian):
"""This historian forwards data to another platform.
"""
def __init__(self, destination_vip, destination_serverkey,
destination_historian_identity=PLATFORM_HISTORIAN,
**kwargs):
"""
:param destination_vip: vip address of the destination volttron
instance
:param destination_serverkey: public key of the destination server
:param services_topic_list: subset of topics that are inherently
supported by base historian. Default is device, analysis, logger,
and record topics
:param custom_topic_list: any additional topics this historian
should subscribe to.
:param destination_historian_identity: vip identity of the
destination historian. default is 'platform.historian'
:param kwargs: additional arguments to be passed along to parent class
"""
kwargs["process_loop_in_greenlet"] = True
super(DataMover, self).__init__(**kwargs)
self.destination_vip = destination_vip
self.destination_serverkey = destination_serverkey
self.destination_historian_identity = destination_historian_identity
config = {"destination_vip":self.destination_vip,
"destination_serverkey": self.destination_serverkey,
"destination_historian_identity": self.destination_historian_identity}
self.update_default_config(config)
# will be available in both threads.
self._last_timeout = 0
def configure(self, configuration):
self.destination_vip = str(configuration.get('destination_vip', ""))
self.destination_serverkey = str(configuration.get('destination_serverkey', ""))
self.destination_historian_identity = str(configuration.get('destination_historian_identity', PLATFORM_HISTORIAN))
#Redirect the normal capture functions to capture_data.
def _capture_device_data(self, peer, sender, bus, topic, headers, message):
self.capture_data(peer, sender, bus, topic, headers, message)
def _capture_log_data(self, peer, sender, bus, topic, headers, message):
self.capture_data(peer, sender, bus, topic, headers, message)
def _capture_analysis_data(self, peer, sender, bus, topic, headers, message):
self.capture_data(peer, sender, bus, topic, headers, message)
def _capture_record_data(self, peer, sender, bus, topic, headers, message):
self.capture_data(peer, sender, bus, topic, headers, message)
def timestamp(self):
return time.mktime(datetime.datetime.now().timetuple())
def capture_data(self, peer, sender, bus, topic, headers, message):
# Grab the timestamp string from the message (we use this as the
# value in our readings at the end of this method)
_log.debug("In capture data")
timestamp_string = headers.get(headers_mod.DATE, None)
data = message
try:
# 2.0 agents compatability layer makes sender = pubsub.compat
# so we can do the proper thing when it is here
_log.debug("message in capture_data {}".format(message))
if sender == 'pubsub.compat':
data = compat.unpack_legacy_message(headers, message)
_log.debug("data in capture_data {}".format(data))
if isinstance(data, dict):
data = data
elif isinstance(data, int) or \
isinstance(data, float) or \
isinstance(data, long):
data = data
except ValueError as e:
log_message = "message for {topic} bad message string:" \
"{message_string}"
_log.error(log_message.format(topic=topic,
message_string=message[0]))
raise
topic = self.get_renamed_topic(topic)
if self.gather_timing_data:
add_timing_data_to_header(
headers,
self.core.agent_uuid or self.core.identity,
"collected")
payload = {'headers': headers, 'message': data}
self._event_queue.put({'source': "forwarded",
'topic': topic,
'readings': [(timestamp_string, payload)]})
def publish_to_historian(self, to_publish_list):
_log.debug("publish_to_historian number of items: {}"
.format(len(to_publish_list)))
current_time = self.timestamp()
last_time = self._last_timeout
_log.debug('Last timeout: {} current time: {}'.format(last_time,
current_time))
if self._last_timeout:
# if we failed we need to wait 60 seconds before we go on.
if self.timestamp() < self._last_timeout + 60:
_log.debug('Not allowing send < 60 seconds from failure')
return
if not self._target_platform:
self.historian_setup()
if not self._target_platform:
_log.debug('Could not connect to target')
return
to_send = []
for x in to_publish_list:
topic = x['topic']
headers = x['value']['headers']
message = x['value']['message']
if self.gather_timing_data:
add_timing_data_to_header(
headers,
self.core.agent_uuid or self.core.identity,
"forwarded")
to_send.append({'topic': topic,
'headers': headers,
'message': message})
with gevent.Timeout(30):
try:
_log.debug("Sending to destination historian.")
self._target_platform.vip.rpc.call(
self.destination_historian_identity, 'insert',
to_send).get(timeout=10)
self.report_all_handled()
except gevent.Timeout:
self._last_timeout = self.timestamp()
self._target_platform.core.stop()
self._target_platform = None
_log.error("Timeout when attempting to publish to target.")
self.vip.health.set_status(
STATUS_BAD, "Timeout occurred")
def historian_setup(self):
_log.debug("Setting up to forward to {}".format(self.destination_vip))
try:
agent = build_agent(address=self.destination_vip,
serverkey=self.destination_serverkey,
publickey=self.core.publickey,
secretkey=self.core.secretkey,
enable_store=False)
except gevent.Timeout:
self.vip.health.set_status(
STATUS_BAD, "Timeout in setup of agent")
status = Status.from_json(self.vip.health.get_status())
self.vip.health.send_alert(DATAMOVER_TIMEOUT_KEY,
status)
else:
self._target_platform = agent
def historian_teardown(self):
# Kill the forwarding agent if it is currently running.
if self._target_platform is not None:
self._target_platform.core.stop()
self._target_platform = None
def main(argv=sys.argv):
utils.vip_main(historian)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| StarcoderdataPython |
1793167 | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
class GenderChoices(models.TextChoices):
MALE = "M", "Male"
FEMALE = "F", "Female"
class MatchChoices(models.TextChoices):
MALE = "M", "Male"
FEMALE = "F", "Female"
ANY = "A", "Any"
password = models.CharField('password', max_length=128, blank=True)
gender = models.CharField(max_length=1, choices=GenderChoices.choices)
want_match = models.CharField(max_length=1, choices=MatchChoices.choices)
client_ip = models.GenericIPAddressField(null=True)
created_at = models.DateTimeField(verbose_name="생성일", auto_now_add=True)
| StarcoderdataPython |
4833121 | <filename>DQM/DTMonitorClient/python/dtResolutionAnalysisTest_cfi.py
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
dtResolutionAnalysisTest = DQMEDHarvester("DTResolutionAnalysisTest",
diagnosticPrescale = cms.untracked.int32(1),
maxGoodMeanValue = cms.untracked.double(0.005),
minBadMeanValue = cms.untracked.double(0.015),
maxGoodSigmaValue = cms.untracked.double(0.05),
minBadSigmaValue = cms.untracked.double(0.08),
# top folder for the histograms in DQMStore
topHistoFolder = cms.untracked.string('DT/02-Segments')
)
| StarcoderdataPython |
3263119 | from flask import Blueprint
participant = Blueprint('participant', __name__)
from . import views # noqa
| StarcoderdataPython |
1692460 | """
搜索顺序
"""
import sys
# 导入搜索路径:["根目录",.....,]
# 每次导入时,都会遍历该列表,
# 如果导入路径与列表中记录的路径,能够找到文件,则导入成功
print(sys.path)
| StarcoderdataPython |
3355151 | <reponame>iconnor/cowrie<filename>src/cowrie/test/test_cat.py
# -*- test-case-name: Cowrie Test Cases -*-
# Copyright (c) 2018 <NAME>
# See LICENSE for details.
"""
Tests for general shell interaction and cat command
"""
import os
from twisted.trial import unittest
from cowrie.shell import protocol
from cowrie.test import fake_server, fake_transport
os.environ["COWRIE_HONEYPOT_DATA_PATH"] = "../data"
os.environ["COWRIE_HONEYPOT_DOWNLOAD_PATH"] = "/tmp"
os.environ["COWRIE_SHELL_FILESYSTEM"] = "../share/cowrie/fs.pickle"
PROMPT = b"root@unitTest:~# "
class ShellCatCommandTests(unittest.TestCase):
def setUp(self):
self.proto = protocol.HoneyPotInteractiveProtocol(
fake_server.FakeAvatar(fake_server.FakeServer())
)
self.tr = fake_transport.FakeTransport("1.1.1.1", "1111")
self.proto.makeConnection(self.tr)
self.tr.clear()
def test_cat_command_001(self):
"""
No such file
"""
self.proto.lineReceived(b"cat nonExisting\n")
self.assertEqual(
self.tr.value(), b"cat: nonExisting: No such file or directory\n" + PROMPT
)
def test_cat_command_002(self):
"""
argument - (stdin)
"""
self.proto.lineReceived(b"echo test | cat -\n")
self.assertEqual(self.tr.value(), b"test\n" + PROMPT)
def test_cat_command_003(self):
"""
test without arguments, read stdin only and quit
"""
self.proto.lineReceived(b"echo 1 | cat\n")
self.proto.lineReceived(b"echo 2\n")
self.proto.handle_CTRL_D()
self.assertEqual(self.tr.value(), b"1\n" + PROMPT + b"2\n" + PROMPT)
def test_cat_command_004(self):
"""
test handle of CTRL_C
"""
self.proto.lineReceived(b"cat\n")
self.proto.lineReceived(b"test\n")
self.proto.handle_CTRL_C()
self.assertEqual(self.tr.value(), b"test\n^C\n" + PROMPT)
def tearDown(self):
self.proto.connectionLost("tearDown From Unit Test")
| StarcoderdataPython |
1673823 | # Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from buckets.utils.cloud.azure_client import AzureClient
from buckets.utils.cloud.utilities import object_exists, get_versions
from buckets.utils.listing import *
from buckets.utils.file_utils import *
from common_utils.pipe_cli import *
ERROR_MESSAGE = "An error occurred in case "
@pytest.mark.skipif(os.environ['CP_PROVIDER'] == AzureClient.name, reason="Versioning is not supported for AZURE provider")
class TestDataStorageVersioning(object):
test_file_1 = "versioning1.txt"
test_file_1_without_extension = "versioning1"
test_file_2 = "versioning2.txt"
test_file_3 = "versioning3.txt"
test_folder_1 = "test_folder1"
test_folder_2 = "test_folder2"
test_folder_3 = "test_folder3"
bucket = 'epmcmbibpc-versioning-it{}'.format(get_test_prefix())
path_to_bucket = 'cp://{}'.format(bucket)
token = os.environ['USER_TOKEN']
user = os.environ['TEST_USER']
@classmethod
def setup_class(cls):
create_data_storage(cls.bucket, versioning=True)
create_test_file(os.path.abspath(cls.test_file_1), TestFiles.DEFAULT_CONTENT)
create_test_file(os.path.abspath(cls.test_file_2), TestFiles.COPY_CONTENT)
@classmethod
def teardown_class(cls):
clean_test_data(os.path.abspath(cls.test_file_1))
clean_test_data(os.path.abspath(cls.test_file_2))
delete_data_storage(cls.bucket)
def teardown_method(self, method):
pipe_output = get_pipe_listing(self.path_to_bucket, recursive=True)
for record in pipe_output:
pipe_storage_rm('cp://{}/{}'.format(self.bucket, record.name), args=['--hard-delete'], recursive=True,
expected_status=None)
pipe_output = get_pipe_listing(self.path_to_bucket, recursive=True)
assert len(pipe_output) == 0
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_file_1), args=['--hard-delete'], recursive=True,
expected_status=None)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), args=['--hard-delete'], recursive=True,
expected_status=None)
def test_list_marked_for_deletion_object(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_storage_rm(destination)
actual_output = get_pipe_listing(self.path_to_bucket)
assert len(actual_output) == 0
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True))
expected_output = [
f(self.test_file_1, deleted=True, latest=True),
f(self.test_file_1, 10, added=True)
]
compare_listing(actual_output, expected_output, 2)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-877:" + "\n" + e.message)
def test_restore_marked_for_deletion_file(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_storage_rm(destination)
output = pipe_storage_ls(self.path_to_bucket, show_details=False)[0]
assert len(output) == 0
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True))
expected_output = [
f(self.test_file_1, deleted=True, latest=True),
f(self.test_file_1, 10, added=True)
]
compare_listing(actual_output, expected_output, 2)
pipe_storage_restore(destination)
actual_output = get_pipe_listing(self.path_to_bucket)
expected_output = [
f(self.test_file_1, 10)
]
compare_listing(actual_output, expected_output, 1)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-881:" + "\n" + e.message)
def test_object_versions(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_storage_cp(self.test_file_2, destination, force=True) # another file with same name
actual_output = get_pipe_listing(self.path_to_bucket)
expected_output = [
f(self.test_file_1, 14)
]
compare_listing(actual_output, expected_output, 1)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True))
expected_output = [
f(self.test_file_1, 14, added=True, latest=True),
f(self.test_file_1, 10, added=True)
]
compare_listing(actual_output, expected_output, 2)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-882:" + "\n" + e.message)
def test_restore_specific_version(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_storage_cp(self.test_file_2, destination, force=True)
pipe_storage_rm(destination)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True))
expected_output = [
f(self.test_file_1, deleted=True, latest=True),
f(self.test_file_1, 14, added=True),
f(self.test_file_1, 10, added=True)
]
compare_listing(actual_output, expected_output, 3, sort=False)
version = get_non_latest_version(actual_output)
assert version, "No version available to restore."
pipe_storage_restore(destination, version=version, expected_status=0)
actual_output = get_pipe_listing(self.path_to_bucket)
expected_output = [
f(self.test_file_1, 14)
]
compare_listing(actual_output, expected_output, 1)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-883:" + "\n" + e.message)
def test_object_hard_deletion(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination)
pipe_storage_rm(destination, args=['--hard-delete'])
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True), result_not_empty=False)
expected_output = []
compare_listing(actual_output, expected_output, 0)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-884:" + "\n" + e.message)
def test_marked_object_hard_deletion(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination)
pipe_storage_rm(destination)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True))
expected_output = [
f(self.test_file_1, deleted=True, latest=True),
f(self.test_file_1, 10, added=True),
]
compare_listing(actual_output, expected_output, 2)
pipe_storage_rm(destination, args=['--hard-delete'], recursive=True)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(self.path_to_bucket, versioning=True), result_not_empty=False)
expected_output = []
compare_listing(actual_output, expected_output, 0)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-993:" + "\n" + e.message)
def test_mark_for_deletion_non_empty_folder(self):
destination_1 = 'cp://{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_1)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), recursive=True)
actual_output = get_pipe_listing(self.path_to_bucket)
assert len(actual_output) == 0
actual_output = get_pipe_listing(self.path_to_bucket, versioning=True)
assert len(actual_output) == 1 and self.test_folder_1 in actual_output[0].name
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1), versioning=True, recursive=True))
expected_output = [
f('{}/{}'.format(self.test_folder_1, self.test_file_1), deleted=True, latest=True),
f('{}/{}'.format(self.test_folder_1, self.test_file_1), 10, added=True)
]
compare_listing(actual_output, expected_output, 2)
pipe_storage_restore('cp://{}/{}'.format(self.bucket, self.test_folder_1), expected_status=0,
recursive=True)
actual_output = get_pipe_listing(self.path_to_bucket)
assert len(actual_output) == 1 and self.test_folder_1 in actual_output[0].name
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1), versioning=True, recursive=True))
expected_output = [
f('{}/{}'.format(self.test_folder_1, self.test_file_1), 10, added=True, latest=True),
f('{}/{}'.format(self.test_folder_1, self.test_file_1), 10, added=True)
]
compare_listing(actual_output, expected_output, 2, sort=False)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-885-886:" + "\n" + e.message)
def test_hard_deletion_non_empty_folder(self):
destination_1 = 'cp://{}/{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_folder_2, self.test_file_1)
destination_2 = 'cp://{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_1)
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_2)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), args=['--hard-delete'],
recursive=True)
actual_output = get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1))
assert len(actual_output) == 0
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1), versioning=True),
result_not_empty=False)
expected_output = []
compare_listing(actual_output, expected_output, 0)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-887:" + "\n" + e.message)
def test_hard_deletion_marked_non_empty_folder(self):
destination_1 = 'cp://{}/{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_folder_2, self.test_file_1)
destination_2 = 'cp://{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_1)
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_2)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), recursive=True)
actual_output = get_pipe_listing(self.path_to_bucket)
assert len(actual_output) == 0
actual_output = get_pipe_listing(self.path_to_bucket, versioning=True)
assert len(actual_output) == 1 and self.test_folder_1 in actual_output[0].name
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1), versioning=True, recursive=True))
expected_output = [
f('{}/{}'.format(self.test_folder_1, self.test_file_1), deleted=True, latest=True),
f('{}/{}'.format(self.test_folder_1, self.test_file_1), 10, added=True),
f('{}/{}/{}'.format(self.test_folder_1, self.test_folder_2, self.test_file_1),
deleted=True, latest=True),
f('{}/{}/{}'.format(self.test_folder_1, self.test_folder_2, self.test_file_1), 10, added=True)
]
compare_listing(actual_output, expected_output, 4)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), args=['--hard-delete'],
recursive=True)
actual_output = get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1))
assert len(actual_output) == 0
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing('cp://{}/{}'.format(self.bucket, self.test_folder_1), versioning=True),
result_not_empty=False)
expected_output = []
compare_listing(actual_output, expected_output, 0)
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-998:" + "\n" + e.message)
def test_mark_for_delete_non_existing_file(self):
destination = 'cp://{}/{}'.format(self.bucket, TestFiles.NOT_EXISTS_FILE)
try:
error_message = pipe_storage_rm(destination, recursive=True, expected_status=1)[1]
assert 'Storage path "{}" was not found'.format(destination) in error_message[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-945:" + "\n" + e.message)
def test_hard_delete_non_existing_file(self):
destination = 'cp://{}/{}'.format(self.bucket, TestFiles.NOT_EXISTS_FILE)
try:
error_message = pipe_storage_rm(destination, recursive=True, expected_status=1, args=['--hard-delete'])[1]
assert 'Storage path "{}" was not found'.format(destination) in error_message[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-946:" + "\n" + e.message)
def test_restore_non_existing_version(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
not_existing_version = 'does-not-exist'
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_storage_cp(self.test_file_2, destination, force=True) # another file with same name
error_message = pipe_storage_restore(destination, expected_status=1, version=not_existing_version)[1]
assert 'Error: Version "{}" doesn\'t exist.'.format(not_existing_version) in error_message[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-947:" + "\n" + e.message)
def test_restore_not_removed_object(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
error_message = pipe_storage_restore(destination, expected_status=1)[1]
assert 'Error: Latest file version is not deleted. Please specify "--version" parameter.'\
in error_message[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-948:" + "\n" + e.message)
def test_restore_latest_version(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(self.test_file_1, destination)
pipe_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(destination, versioning=True))
version_id = get_latest_version(pipe_output)
error_message = pipe_storage_restore(destination, version=version_id, expected_status=1)[1]
assert 'Version "{}" is already the latest version'.format(version_id)\
in error_message[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-949:" + "\n" + e.message)
def test_role_model_marked_object_deletion(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(os.path.abspath(self.test_file_1), destination, token=self.token)
pipe_storage_rm(destination, token=self.token)
pipe_output = get_pipe_listing(self.path_to_bucket)
assert len(pipe_output) == 0
pipe_output = pipe_storage_ls(self.path_to_bucket, expected_status=1, token=self.token, versioning=True)[1]
assert "Access is denied" in pipe_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-889:" + "\n" + e.message)
def test_role_model_object_versions(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(self.test_file_1, destination, token=self.token, expected_status=0)
pipe_storage_cp(self.test_file_2, destination, force=True, token=self.token, expected_status=0)
actual_output = get_pipe_listing(self.path_to_bucket, token=self.token)
expected_output = [
f(self.test_file_1, 14)
]
compare_listing(actual_output, expected_output, 1)
actual_output = pipe_storage_ls(self.path_to_bucket, expected_status=1, token=self.token, versioning=True)[1]
assert "Access is denied" in actual_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-891:" + "\n" + e.message)
def test_role_model_restore_latest_version(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(self.test_file_1, destination, token=self.token)
pipe_storage_rm(destination, token=self.token)
pipe_output = get_pipe_listing(self.path_to_bucket)
assert len(pipe_output) == 0
pipe_output = pipe_storage_restore(self.path_to_bucket, expected_status=1, token=self.token)[1]
assert "Access is denied" in pipe_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-894:" + "\n" + e.message)
def test_role_model_object_hard_deletion(self):
destination = 'cp://{}/{}'.format(self.bucket, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(os.path.abspath(self.test_file_1), destination, token=self.token)
pipe_output = pipe_storage_rm(destination, args=['--hard-delete'], token=self.token, expected_status=1)[1]
assert "Access is denied" in pipe_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-892:" + "\n" + e.message)
def test_role_model_restore_marked_for_deletion_non_empty_folder(self):
destination_1 = 'cp://{}/{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_folder_2, self.test_file_1)
destination_2 = 'cp://{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_1, expected_status=0)
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_2, expected_status=0)
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), recursive=True, token=self.token,
expected_status=0)
pipe_output = get_pipe_listing(self.path_to_bucket)
assert len(pipe_output) == 0
pipe_output = pipe_storage_restore(self.path_to_bucket, expected_status=1, token=self.token)[1]
assert "Access is denied" in pipe_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-893:" + "\n" + e.message)
def test_role_model_hard_deletion_marked_non_empty_folder(self):
destination_1 = 'cp://{}/{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_folder_2, self.test_file_1)
destination_2 = 'cp://{}/{}/{}'.format(self.bucket, self.test_folder_1, self.test_file_1)
try:
set_storage_permission(self.user, self.bucket, allow='r')
set_storage_permission(self.user, self.bucket, allow='w')
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_1, expected_status=0)
pipe_storage_cp(os.path.abspath(self.test_file_1), destination_2, expected_status=0)
pipe_output = pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_folder_1), recursive=True,
token=self.token, expected_status=1, args=['--hard-delete'])[1]
assert "Access is denied" in pipe_output[0]
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-895:" + "\n" + e.message)
def test_ls_with_paging(self):
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}".format(self.path_to_bucket, self.test_file_1))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}/{}".format(self.path_to_bucket,
self.test_folder_1, self.test_file_1))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}".format(self.path_to_bucket, self.test_file_2))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}/{}".format(self.path_to_bucket,
self.test_folder_2, self.test_file_1))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}".format(self.path_to_bucket, self.test_file_3))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}/{}".format(self.path_to_bucket,
self.test_folder_3, self.test_file_1))
pipe_output = get_pipe_listing(self.path_to_bucket, show_details=False, paging=str(3))
assert len(pipe_output) == 3
pipe_output = get_pipe_listing(self.path_to_bucket, show_details=False, recursive=True, paging=str(3))
assert len(pipe_output) == 3
pipe_output = get_pipe_listing(self.path_to_bucket, paging=str(3))
assert len(pipe_output) == 3
pipe_output = filter_versioned_lines(get_pipe_listing(self.path_to_bucket, show_details=False,
versioning=True, paging=str(3)))
assert len(pipe_output) == 3
pipe_output = filter_versioned_lines(get_pipe_listing(self.path_to_bucket, versioning=True, paging=str(3)))
assert len(pipe_output) == 3
pipe_output = filter_versioned_lines(get_pipe_listing(self.path_to_bucket, versioning=True, recursive=True,
paging=str(3)))
assert len(pipe_output) == 3
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-1024:" + "\n" + e.message)
def test_copy_with_similar_keys(self):
try:
source = os.path.abspath(self.test_file_1)
destination = "cp://{}/{}".format(self.bucket, self.test_file_1)
destination_without_extension = "cp://{}/{}".format(self.bucket, self.test_file_1_without_extension)
pipe_storage_cp(source, destination)
assert object_exists(self.bucket, self.test_file_1)
pipe_storage_cp(source, destination_without_extension)
assert object_exists(self.bucket, self.test_file_1_without_extension)
pipe_storage_rm(destination_without_extension)
assert not object_exists(self.bucket, self.test_file_1_without_extension)
assert object_exists(self.bucket, self.test_file_1)
pipe_storage_rm(destination)
assert not object_exists(self.bucket, self.test_file_1)
except AssertionError as e:
pytest.fail("Test case {} failed. {}".format("EPMCMBIBPC-1337", e.message))
def test_rm_files_with_common_keys(self):
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}".format(self.path_to_bucket,
self.test_file_1_without_extension))
pipe_storage_cp(os.path.abspath(self.test_file_1), "{}/{}".format(self.path_to_bucket, self.test_file_1))
pipe_storage_rm('cp://{}/{}'.format(self.bucket, self.test_file_1_without_extension),
args=['--hard-delete'], recursive=False, expected_status=None)
pipe_output = get_pipe_listing(self.path_to_bucket)
assert len(pipe_output) == 1
except BaseException as e:
pytest.fail(ERROR_MESSAGE + "epmcmbibpc-1283:" + "\n" + e.message)
def test_list_version(self):
destination = "cp://{}/{}".format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination)
pipe_storage_cp(os.path.abspath(self.test_file_1), destination, force=True)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(destination, show_details=True, versioning=True))
expected_files = [
f(self.test_file_1, 10, added=True, latest=True),
f(self.test_file_1, 10, added=True)
]
expected_versions = get_versions(self.bucket, self.test_file_1)
for index, expected_file in enumerate(expected_files):
expected_file.version_id = expected_versions[index]
compare_listing(actual_output, expected_files, 2, show_details=True, check_version=True, sort=False)
except AssertionError as e:
pytest.fail(ERROR_MESSAGE + ":\n" + e.message)
def test_list_deleted_version(self):
destination = "cp://{}/{}".format(self.bucket, self.test_file_1)
try:
pipe_storage_cp(os.path.abspath(self.test_file_1), destination)
pipe_storage_rm(destination)
actual_output = assert_and_filter_first_versioned_listing_line(
get_pipe_listing(destination, show_details=True, versioning=True))
expected_files = [
f(self.test_file_1, deleted=True, latest=True),
f(self.test_file_1, 10, added=True)
]
expected_versions = get_versions(self.bucket, self.test_file_1)
for index, expected_file in enumerate(expected_files):
expected_file.version_id = expected_versions[index]
compare_listing(actual_output, expected_files, 2, show_details=True, check_version=True)
except AssertionError as e:
pytest.fail(ERROR_MESSAGE + ":\n" + e.message)
| StarcoderdataPython |
1765138 | import unittest
from datetime import datetime
from datetime import timedelta
from programy.services.library.base import PythonAPIService
from programy.services.library.base import PythonAPIServiceException
from programy.services.config import ServiceConfiguration
class PythonAPIServiceExceptionTests(unittest.TestCase):
def test_init(self):
exception = PythonAPIServiceException("Service failed")
self.assertIsNotNone(exception)
self.assertEquals('Service failed', str(exception))
class PythonAPIServiceTests(unittest.TestCase):
def _create_service(self):
class MockPythonAPIService(PythonAPIService):
def __init__(self, configuration):
PythonAPIService.__init__(self, configuration)
def _response_to_json(self, api, response):
return response
configuration = ServiceConfiguration.from_data("rest", "test", "category")
return MockPythonAPIService(configuration)
def test_init(self):
service = self._create_service()
self.assertIsNotNone(service)
def test_add_base_payload(self):
service = self._create_service()
self.assertIsNotNone(service)
data = {"response": {}}
started = datetime.now()
speed = timedelta(microseconds=3000)
service._add_base_payload(data, "success", started, speed)
self.assertTrue('response' in data)
self.assertEqual(data['response']['status'], "success")
self.assertEqual(data['response']['started'], started.strftime("%d/%m/%Y, %H:%M:%S"))
self.assertEqual(data['response']['speed'], "3.0ms")
self.assertEqual(data['response']['service'], "test")
self.assertEqual(data['response']['category'], "category")
def test_create_success_payload(self):
service = self._create_service()
self.assertIsNotNone(service)
started = datetime.now()
speed = timedelta(microseconds=3000)
data = service._create_success_payload("search", started, speed, "search results")
self.assertTrue('response' in data)
self.assertEqual(data['response']['status'], "success")
self.assertEqual(data['response']['started'], started.strftime("%d/%m/%Y, %H:%M:%S"))
self.assertEqual(data['response']['speed'], "3.0ms")
self.assertEqual(data['response']['service'], "test")
self.assertEqual(data['response']['category'], "category")
self.assertTrue('payload' in data['response'])
self.assertEqual(data['response']['payload'], "search results")
def test_create_failure_payload(self):
service = self._create_service()
self.assertIsNotNone(service)
started = datetime.now()
speed = timedelta(microseconds=3000)
data = service._create_failure_payload("search", started, speed)
self.assertTrue('response' in data)
self.assertEqual(data['response']['status'], "failure")
self.assertEqual(data['response']['started'], started.strftime("%d/%m/%Y, %H:%M:%S"))
self.assertEqual(data['response']['speed'], "3.0ms")
self.assertEqual(data['response']['service'], "test")
self.assertEqual(data['response']['category'], "category")
self.assertTrue('payload' in data['response'])
self.assertEqual(data['response']['payload']['type'], "general")
def test_create_exception_failure_payload(self):
service = self._create_service()
self.assertIsNotNone(service)
started = datetime.now()
speed = timedelta(microseconds=3000)
data = service._create_exception_failure_payload("search", started, speed, PythonAPIServiceException("Service failure"))
self.assertTrue('response' in data)
self.assertEqual(data['response']['status'], "failure")
self.assertEqual(data['response']['started'], started.strftime("%d/%m/%Y, %H:%M:%S"))
self.assertEqual(data['response']['speed'], "3.0ms")
self.assertEqual(data['response']['service'], "test")
self.assertEqual(data['response']['category'], "category")
self.assertTrue('payload' in data['response'])
self.assertEqual(data['response']['payload']['type'], "general")
self.assertEqual(data['response']['payload']['error'], "Service failure")
| StarcoderdataPython |
1690101 | <reponame>dcampos/nvim-ulf
from .lfx import LFX, RequestHelper
| StarcoderdataPython |
1669829 | <reponame>Pablo-RodriguezOrtiz/Small-projects
# ------------------------------------------------------------------------
#
#
# Made with python 3.8.8
#
# As professor requested, we used "/" to separate characters and "//" to separate words.
# ------------------------------------------------------------------------
def amorse():
codigo_morse = { #Introducimos el diccionario con el c.morse.
"a": ".-", "b": "-...", "c": "-.-.", "d": "-..", "e": ".", "f": "..-.",
"g": "--.", "h": "....", "i": "..", "j": "·---", "k": "-.-", "l": ".-..",
"m": "--", "n": "-.", "ñ": "--.--", "o": "---", "p": ".__.", "q": "--.-",
"r": ".-.", "s": "...", "t": "-", "u": "..-", "v": "...-", "w": ".--",
"x": "-..-", "y": "-.--", "z": "--..",
"0": "-----", "1": ".----", "2": "..---", "3": "...--", "4": "....-",
"5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.",
".": ".-.-.-", ",": "--..--", "?": "..--..","!": "-.-.--", "\"": ".-..-.",
"\'": ".----.","+": ".-.-.","-": "-....-", "/": "-..-.", ":": "---...",
"=": "-...-", "_": "..--.-","$": "..._.._", "@": ".--.-.", "&": ".-...",
"(": "-.--.", ")": "-.--.-"}
codif="" #Variable donde almacenamos el mensaje.
x=input("Introduce la frase a codificar: ").lower()
x= x.replace("á","a").replace("é","e").replace("í","i").replace("ó","o").replace("ú","u") #Homogeneizamos la frase.
for i in range(len(x)): #Recorremos la frase introducida.
if x[i] in codigo_morse and i==len(x)-1: #Si es la última letra, no ponemos / al final.
codif+=codigo_morse[x[i]]
elif x[i] in codigo_morse: #La buscamos en el dicc y...
codif+=codigo_morse[x[i]]+"/" #... la cambiamos por su traducción y...
elif x[i]==" ": #... el separador de letras al final.
codif+="/" #Si es espacio, añadimos "/" y hacemos doble.
else: #Doble / = espacio entre palabras.
codif+=x[i]
return codif.strip() #Devolvemos el resultado.
amorse() | StarcoderdataPython |
3248505 | <gh_stars>1-10
userdb="user"
user_design="user"
user_view="user"
flowsdb="flows_bak"
flows_design="flows"
flows_view="flow"
switches="switches_bak"
switches_design="switches"
switches_view="switch"
| StarcoderdataPython |
68611 | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("SparkSQL").getOrCreate()
# header means we are using a header in the CSV File
# inferSchema means to tell spark to try to figure it out the table Schema
people = spark.read.option("header", "true").option("inferSchema", "true")\
.csv("file:///home/sambiase/courses/SparkCourse/fakefriends-header.csv")
print("Here is our inferred schema:")
people.printSchema()
print("Let's display the name column:")
people.select("name").show() # using the select from the dataframe instead of a sql query
print("Filter out anyone over 21:")
people.filter(people.age < 21).show()
print("Group by age")
people.groupBy("age").count().show() # age is a column name inferred in the Schema
print("Make everyone 10 years older:")
people.select(people.name, people.age + 10).show()
spark.stop()
| StarcoderdataPython |
3343531 | # CHECK-JQ: .scope == {}
# CHECK-TREE: (#unit)
()
| StarcoderdataPython |
4827929 | #!/bin/python
######################################
# Generate contact maps from bam files
# and fragment lists
#
# Author: <NAME> (28/11/2014)
######################################
import os, sys, re
import traceback
from optparse import OptionParser
import fileinput
import datetime
from readData import *
from quicksect import IntervalTree
import gzip
import numpy as np
import scipy.sparse as ss
# manage option and arguments processing
def main():
global options
global args
usage = '''usage: %prog [options] [bamFile]+
generates (single locus) fragmentCounts and (pairwise) contactCount files
for a (set of) aligned files.
NOTE: If multiple libraries are input they are simply pooled, i.e. read-pairs are
summed across all libraries - this may bias results to a libraries sequenced deeper)
'''
parser = OptionParser(usage)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print status messages to stdout")
parser.add_option("-V", "--veryverbose", action="store_true", dest="vverbose", default=False,
help="print lots of status messages to stdout")
parser.add_option("-P", "--CPU-processes", type="int", dest="cpus", default=-1,
help="number of CPU threads to use, -1 for all available [default -1]")
parser.add_option("-O", "--onlycis", action="store_true", dest="onlycis", default=False,
help="only consider intra chromosomal contacts (cis)")
parser.add_option("-M", "--multicount", action="store_true", dest="multicount", default=True,
help="count read that maps onces for each fragment it maps to (potentially several)")
parser.add_option("-g", "--genomeFragmentFile", type="string", dest="genomeFragmentFile", default="",
help="file containing the genome fragments after digestion with the restriction enzyme(s), generated by hicup")
parser.add_option("-f", "--fragmentAggregation", type="int", dest="fragmentAggregation", default=1,
help="number of restriction enzyme fragments to concat")
parser.add_option("-r", "--resolution", type=int, dest="resolution", default=1000000,
help="size of a fragment in bp if no genomeFragmentFile is given")
parser.add_option("-c", "--chromsizes", type="string", dest="chromSizes", default="",
help="tab separated file containing chromosome sizes")
parser.add_option("-C", "--chrompattern", type="string", dest="chromPattern", default="",
help="pattern of chromosomes to filter for [default all]")
parser.add_option("-m", "--mappability", type="string", dest="mappability", default="",
help="bigwig containing mappability score for a given tag size")
parser.add_option("-o", "--outputDir", type="string", dest="outputDir", default="",
help="output directory [default: %default]")
parser.add_option("-n", "--outputFilename", type="string", dest="outputFilename", default="",
help="output filename [default: extracted from first input file")
parser.add_option("-t", "--tmpDir", type="string", dest="tmpDir", default="/tmp",
help="directory for temp files [default: %default]")
parser.add_option("-s", "--sep", type="string", dest="separator", default=" ",
help="delimiter to use when reading the input [default: %default]")
parser.add_option("--create2DMatrix", action="store_true", dest="create2DMatrix", default=False,
help="create a tab separated 2D matrix file")
parser.add_option("--create2DMatrixPerChr", action="store_true", dest="create2DMatrixPerChr", default=False,
help="create a tab separated 2D matrix file one per Chromosome")
parser.add_option("--inputIsFragmentPairs", action="store_true", dest="inputIsFragmentPairs", default=False,
help="input is a gzipped fragment pair file rather than bam files")
parser.add_option("--inputIsReadPairs", type="string", dest="inputIsReadPairs", default="",
help="gzipped files with mapped read pair information, requires 4 column identifier corresponding to chrA,posA,chrB,posB,chrPrefix (separated buy comma), e.g. 2,3,6,7,chr")
(options, args) = parser.parse_args()
if (len(args) < 1):
parser.print_help()
parser.error("[ERROR] Incorrect number of arguments, need a dataset")
if (options.fragmentAggregation < 1):
parser.error("[ERROR] fragmentAggregation must be a positive integer, was :"+str(options.fragmentAggregation))
sys.exit(1)
if (options.genomeFragmentFile != ""):
if (not os.path.isfile(options.genomeFragmentFile)):
parser.error("[ERROR] genomeFragmentFile does not exist, was :"+str(options.genomeFragmentFile))
sys.exit(1)
else:
if (options.resolution < 1):
parser.error("[ERROR] resolution must be a positive integer, was :"+str(options.resolution))
sys.exit(1)
elif (options.chromSizes == "" or not os.path.isfile(options.chromSizes)):
parser.error("[ERROR] chromSizes not given or not existing, was :"+str(options.chromSizes))
sys.exit(1)
if (options.outputDir != ""):
options.outputDir += os.sep
if (options.inputIsReadPairs != ""):
if (len(options.inputIsReadPairs.split(",")) < 4 or len(options.inputIsReadPairs.split(",")) > 5):
parser.error("[ERROR] inputIsReadPairs does not have 4 column indexes :"+str(options.inputIsReadPairs))
sys.exit(1)
elif (options.inputIsFragmentPairs):
parser.error("[ERROR] inputIsFragmentPairs and inputIsReadPairs cannot be set at the same time")
sys.exit(1)
if (options.verbose):
print >> sys.stdout, "genomeFragmentFile: %s" % (options.genomeFragmentFile)
print >> sys.stdout, "fragmentAggregation: %s" % (options.fragmentAggregation)
print >> sys.stdout, "resolution: %s" % (options.resolution)
print >> sys.stdout, "chromSizes: %s" % (options.chromSizes)
print >> sys.stdout, "outputDir: %s" % (options.outputDir)
print >> sys.stdout, "tmpDir: %s" % (options.tmpDir)
process()
def output(fragmentsMap , fragmentList, fragmentPairs, fragmentCount, fragmentsChrom, mappableList):
'''
outputs 2 files, the first containing
"chr extraField fragmentMid marginalizedContactCount mappable? (0/1)"
and the second containing:
"chr1 fragmentMid1 chr2 fragmentMid2 contactCount"
optionally output the 2D contact matrix
'''
if (options.verbose):
print >> sys.stdout, "- %s START : output data " % (timeStamp())
if ( options.outputFilename != "" ):
outfile1 = gzip.open(options.outputDir+options.outputFilename+".fragmentLists.gz","wb")
else:
outfile1 = gzip.open(options.outputDir+os.path.basename(args[0])+".fragmentLists.gz","wb")
fragmentIds = fragmentsMap.keys()
fragmentIds.sort()
chromlen={}
for line in fileinput.input([options.chromSizes]):
(chrom, chromsize) =line.split("\t")[0:2]
# check if chromosome needs to be filtered out or not
if (options.chromPattern != "" and not re.match("^"+options.chromPattern+"$", chrom)):
continue
chromlen[chrom]=int(chromsize)
for fragmentId in fragmentIds:
(chrom, start, end) = fragmentsMap[fragmentId]
if (options.vverbose):
print >> sys.stdout, "- process %s %d-%d " % (chrom, start, end)
contactCounts = fragmentList[fragmentId]
if (options.mappability == "" and contactCounts>0):
mappableList[fragmentId]=1
midpoint = min(int(0.5*(start+end)),chromlen[chrom])
outfile1.write("%s\t%d\t%s\t%f\n" % (chrom, midpoint, "NA", mappableList[fragmentId]))
outfile1.close()
if ( options.outputFilename != "" ):
outfile2 = gzip.open(options.outputDir+options.outputFilename+".contactCounts.gz","wb")
else:
outfile2 = gzip.open(options.outputDir+os.path.basename(args[0])+".contactCounts.gz","wb")
if (options.verbose):
print " Size of combined matrix: {}".format(fragmentPairs.data.nbytes + fragmentPairs.indptr.nbytes + fragmentPairs.indices.nbytes)
(I,J,V) = ss.find(fragmentPairs)
for row,col,contactCounts in np.nditer([I,J,V]):
(chrom1, start1, end1) = fragmentsMap[int(row)]
(chrom2, start2, end2) = fragmentsMap[int(col)]
midpoint1 = min(int(0.5*(start1+end1)),chromlen[chrom1])
midpoint2 = min(int(0.5*(start2+end2)),chromlen[chrom2])
outfile2.write("%s\t%d\t%s\t%d\t%d\n" % (chrom1, midpoint1, chrom2, midpoint2, int(contactCounts)))
outfile2.close()
if (options.verbose):
print >> sys.stdout, "- %s FINISHED: output data" % (timeStamp())
def process():
global options
global args
if (options.genomeFragmentFile != ""):
[ fragmentsMap, lookup_structure, fragmentCount, fragmentsChrom ] = createIntervalTreesFragmentFile(options)
else:
[ fragmentsMap, lookup_structure, fragmentCount, fragmentsChrom ] = createIntervalTreesFragmentResolution(options)
[ fragmentList, fragmentPairs ] = countReadsPerFragment(fragmentCount, lookup_structure, options,args)
if (options.mappability != ""):
mappableList = createMappabilityList(fragmentsMap, options.mappability, fragmentCount, options)
else:
mappableList = np.zeroes((fragmentCount,), dtype=np.float)
output(fragmentsMap, fragmentList, fragmentPairs, fragmentCount, fragmentsChrom, mappableList)
######################################
# main
######################################
if __name__ == "__main__":
main()
| StarcoderdataPython |
4806138 | <reponame>team-cryptonewbies/crypto-contest-2021
import unittest
from stack_processor.lsh256 import LSHDigest
class TestLSH256Hash(unittest.TestCase):
def test_digest(self):
self.assertEqual(
LSHDigest.digest(data=b"abc").hex(),
"5fbf365daea5446a7053c52b57404d77a07a5f48a1f7c1963a0898ba1b714741",
)
| StarcoderdataPython |
3292097 | #!/usr/bin/env python3
# Add gnomAD's site only HT globals and row annotations to the 38 liftover
import logging
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import hail as hl
from hail_scripts.utils.hail_utils import write_ht, import_table
hl.init()
CONFIG = {
'exomes': {
'37': 'gs://gnomad-public/release/2.1.1/ht/exomes/gnomad.exomes.r2.1.1.sites.ht',
'38': 'gs://gnomad-public/release/2.1.1/liftover_grch38/ht/exomes/gnomad.exomes.r2.1.1.sites.liftover_grch38.ht',
'output_path': 'gs://seqr-reference-data/GRCh38/gnomad/gnomad.exomes.r2.1.1.sites.liftover_grch38.ht'
},
'genomes': {
'37': 'gs://gnomad-public/release/2.1.1/ht/genomes/gnomad.genomes.r2.1.1.sites.ht',
'38': 'gs://gnomad-public/release/2.1.1/liftover_grch38/ht/genomes/gnomad.genomes.r2.1.1.sites.liftover_grch38.ht',
'output_path': 'gs://seqr-reference-data/GRCh38/gnomad/gnomad.genomes.r2.1.1.sites.liftover_grch38.ht'
}
}
def liftover_annotations(gnomad_37_path, gnomad_38_path, annotated_gnomad_38_path):
"""
The 38 liftover of gnomAD is stripped of all global and row annotations. This function
annotates the 38 liftover with the original 37 annotations as the combined reference
data script needs them.
:param gnomad_37_path: path to 37 version of gnomAD for data type
:param gnomad_38_path: path to 38 version of gnomAD for data type
:param annotated_gnomad_38_path: path to annotated 38 version of gnomAD for data type
:return:
"""
ht_37 = hl.read_table(gnomad_37_path)
ht_38 = hl.read_table(gnomad_38_path)
ht_38 = ht_38.annotate(original_alleles=hl.or_else(ht_38.original_alleles, ht_38.alleles))
ht_38 = ht_38.key_by('original_locus', 'original_alleles')
ht_38 = ht_38.annotate(**ht_37[ht_38.key])
ht_38 = ht_38.annotate_globals(**ht_37.index_globals())
ht_38 = ht_38.key_by('locus', 'alleles')
ht_38.write(annotated_gnomad_38_path, overwrite=True)
return ht_38
def run():
for data_type, config in CONFIG.items():
ht = liftover_annotations(config["37"], config["38"], config['output_path'])
ht.describe()
run()
| StarcoderdataPython |
1656589 | import glob
import pandas as pd
import random
import os
import shutil
dftr = pd.read_csv('train.csv')
dfvl = pd.read_csv('val.csv')
dfts = pd.read_csv('test.csv')
datas = {}
for k, v in dftr.iterrows():
if v['label'] in datas:
datas[v['label']].append(v['filename'])
else:
datas[v['label']] = [v['filename']]
for k, v in dfvl.iterrows():
if v['label'] in datas:
datas[v['label']].append(v['filename'])
else:
datas[v['label']] = [v['filename']]
for k, v in dfts.iterrows():
if v['label'] in datas:
datas[v['label']].append(v['filename'])
else:
datas[v['label']] = [v['filename']]
for k, v in datas.items():
assert len(v) == 600
random.shuffle(v)
if not os.path.exists(os.path.join('train_', k)):
os.mkdir(os.path.join('train_', k))
if not os.path.exists(os.path.join('test_', k)):
os.mkdir(os.path.join('test_', k))
for fname in v[:-100]:
shutil.copy(os.path.join('images', fname), os.path.join('train_', k, fname))
for fname in v[-100:]:
shutil.copy(os.path.join('images', fname), os.path.join('test_', k, fname)) | StarcoderdataPython |
4842303 | """
Generate drag-and-drop configuration files for PIC devices through device support scripts
The generated device blob can be used to provide drag and drop programming support for kits with
onboard debuggers
"""
# Python 3 compatibility for Python 2
from __future__ import print_function
# args, logging
import argparse
import logging
import os
import sys
from pymcuprog.deviceinfo.configgenerator import ConfigGenerator
def main(args, loglevel):
"""
Main program
"""
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
# Enforce XML output
if args.filename:
if os.path.splitext(args.filename)[1] != '.xml':
print ("Target filename (-f) must be of type .xml")
sys.exit(-1)
generator = ConfigGenerator()
generator.load_device_model(args.device, args.packpath)
generator.process_programming_functions()
contents = generator.get_xml_string()
if args.filename:
print("Writing to file '{0:s}'".format(args.filename))
with open(args.filename, "w") as xmlfile:
xmlfile.write(contents)
else:
print("Config generator output:")
print(contents)
print("Done")
PARSER = argparse.ArgumentParser(description="Config generator")
# Device to program
PARSER.add_argument("device",
help="device to use")
# Pack path
PARSER.add_argument("-p", "--packpath",
type=str,
help="path to pack")
PARSER.add_argument("-f", "--filename",
type=str,
help="file to write")
PARSER.add_argument("-v", "--verbose",
help="verbose output",
action="store_true")
ARGUMENTS = PARSER.parse_args()
# Setup logging
if ARGUMENTS.verbose:
LOGGING_LEVEL = logging.INFO
else:
LOGGING_LEVEL = logging.WARNING
main(ARGUMENTS, LOGGING_LEVEL)
| StarcoderdataPython |
1715401 | import hydra
from torch.utils.data import random_split
import torchvision
import torch
import math
from upcycle import cuda
from gnosis.distillation.classification import reduce_ensemble_logits
import copy
from torch.utils.data import TensorDataset, DataLoader
import random
import os
from torchvision.datasets.folder import ImageFolder
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
def get_loaders(config):
train_transform, test_transform = get_augmentation(config)
if config.dataset.name == 'tiny_imagenet':
train_dataset = ImageFolder(
root=os.path.join(hydra.utils.get_original_cwd(), config.dataset.root_dir, 'train'),
transform=train_transform
)
test_dataset = ImageFolder(
root=os.path.join(hydra.utils.get_original_cwd(), config.dataset.root_dir, 'val'),
transform=test_transform
)
else:
config.dataset.init.root = os.path.join(hydra.utils.get_original_cwd(), config.dataset.init.root)
train_dataset = hydra.utils.instantiate(config.dataset.init, train=True, transform=train_transform)
test_dataset = hydra.utils.instantiate(config.dataset.init, train=False, transform=test_transform)
if config.dataset.shuffle_train_targets.enabled:
random.seed(config.dataset.shuffle_train_targets.seed)
num_shuffled = int(len(train_dataset) * config.dataset.shuffle_train_targets.ratio)
shuffle_start = random.randint(0, len(train_dataset) - num_shuffled)
target_copy = train_dataset.targets[shuffle_start:shuffle_start + num_shuffled]
random.seed(config.dataset.shuffle_train_targets.seed) # for backwards-compatibility
random.shuffle(target_copy)
train_dataset.targets[shuffle_start:shuffle_start + num_shuffled] = target_copy
subsample_ratio = config.dataset.subsample.ratio
if subsample_ratio < 1.0:
train_splits = split_dataset(train_dataset, subsample_ratio,
config.dataset.subsample.seed)
train_dataset = train_splits[config.dataset.subsample.split]
else:
train_splits = [train_dataset]
if config.trainer.eval_dataset == 'val':
train_dataset, test_dataset = split_dataset(train_dataset, 0.8)
train_loader = hydra.utils.instantiate(config.dataloader, dataset=train_dataset)
test_loader = hydra.utils.instantiate(config.dataloader, dataset=test_dataset)
return train_loader, test_loader, train_splits
def get_text_loaders(config):
tokenizer = get_tokenizer('basic_english')
train_iter = hydra.utils.instantiate(config.dataset.init, split='train')
tokenizer = get_tokenizer('basic_english')
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(
yield_tokens(train_iter), min_freq=config.dataset.min_freq, specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: 0 if x == 'neg' else 1
def collate_batch(batch):
label_list, text_list, text_lens = [], [], []
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
processed_text = processed_text[:config.dataset.max_len]
text_list.append(processed_text)
text_lens.append(processed_text.size(0))
text_list = torch.nn.utils.rnn.pad_sequence(text_list)
label_list = torch.tensor(label_list, dtype=torch.int64)
text_lens = torch.tensor(text_lens, dtype=torch.int64)
input_list = torch.cat([text_lens[None, :], text_list])
return input_list, label_list
train_dataset = list(hydra.utils.instantiate(config.dataset.init, split='train'))
test_dataset = list(hydra.utils.instantiate(config.dataset.init, split='test'))
subsample_ratio = config.dataset.subsample.ratio
if subsample_ratio < 1.0:
train_splits = split_dataset(train_dataset, subsample_ratio,
config.dataset.subsample.seed)
train_dataset = train_splits[config.dataset.subsample.split]
else:
train_splits = [train_dataset]
train_loader = hydra.utils.instantiate(config.dataloader, dataset=train_dataset, collate_fn=collate_batch)
test_loader = hydra.utils.instantiate(config.dataloader, dataset=test_dataset, collate_fn=collate_batch)
return train_loader, test_loader, train_splits, len(vocab)
def split_dataset(dataset, ratio, seed=None):
num_total = len(dataset)
num_split = int(num_total * ratio)
gen = torch.Generator() if seed is None else torch.Generator().manual_seed(seed)
return random_split(dataset, [num_split, num_total - num_split], gen)
def get_augmentation(config):
assert 'augmentation' in config.keys()
transforms_list = []
if config.augmentation.transforms_list is None:
pass
elif len(config.augmentation.transforms_list) > 0:
transforms_list = [hydra.utils.instantiate(config.augmentation[name])
for name in config.augmentation["transforms_list"]]
if 'random_apply' in config.augmentation.keys() and config.augmentation.random_apply.p < 1:
transforms_list = [
hydra.utils.instantiate(config.augmentation.random_apply, transforms=transforms_list)]
normalize_transforms = [
torchvision.transforms.ToTensor(),
]
if config.augmentation.normalization == 'zscore':
# mean subtract and scale to unit variance
normalize_transforms.append(
torchvision.transforms.Normalize(config.dataset.statistics.mean_statistics,
config.dataset.statistics.std_statistics)
)
elif config.augmentation.normalization == 'unitcube':
# rescale values to [-1, 1]
min_vals = config.dataset.statistics.min
max_vals = config.dataset.statistics.max
offset = [0.5 * (min_val + max_val) for min_val, max_val in zip(min_vals, max_vals)]
scale = [(max_val - min_val) / 2 for max_val, min_val in zip(max_vals, min_vals)]
normalize_transforms.append(
torchvision.transforms.Normalize(offset, scale)
)
train_transform = torchvision.transforms.Compose(transforms_list + normalize_transforms)
test_transform = torchvision.transforms.Compose(normalize_transforms)
return train_transform, test_transform
def make_real_teacher_data(train_dataset, teacher, batch_size):
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=False, batch_size=batch_size)
inputs, targets, teacher_logits = [], [], []
teacher.eval()
for input_batch, target_batch in train_loader:
input_batch = cuda.try_cuda(input_batch)
with torch.no_grad():
batch_logits = teacher(input_batch) # [batch_size, num_teachers, ... ]
batch_logits = reduce_ensemble_logits(batch_logits)
inputs.append(input_batch.cpu())
targets.append(target_batch.cpu())
teacher_logits.append(batch_logits.cpu())
inputs = torch.cat(inputs, dim=0)
targets = torch.cat(targets, dim=0)
teacher_logits = torch.cat(teacher_logits, dim=0)
return inputs, targets, teacher_logits
def make_synth_teacher_data(generator, teacher, dataset_size, batch_size):
if dataset_size == 0:
return None
num_rounds = math.ceil(dataset_size / batch_size)
synth_inputs, teacher_labels, teacher_logits = [], [], []
teacher.eval()
generator.eval()
for _ in range(num_rounds):
with torch.no_grad():
input_batch = generator.sample(batch_size)
logit_batch = teacher(input_batch)
logit_batch = reduce_ensemble_logits(logit_batch)
label_batch = logit_batch.argmax(dim=-1)
synth_inputs.append(input_batch.cpu())
teacher_logits.append(logit_batch.cpu())
teacher_labels.append(label_batch.cpu())
synth_inputs = torch.cat(synth_inputs, dim=0)[:dataset_size]
synth_targets = torch.cat(teacher_labels, dim=0)[:dataset_size]
synth_logits = torch.cat(teacher_logits, dim=0)[:dataset_size]
return synth_inputs, synth_targets, synth_logits
def get_distill_loaders(config, train_loader, synth_data):
num_real = len(train_loader.dataset)
num_synth = 0 if synth_data is None else synth_data[0].size(0)
real_ratio = num_real / (num_real + num_synth)
real_batch_size = math.ceil(real_ratio * config.dataloader.batch_size)
synth_batch_size = config.dataloader.batch_size - real_batch_size
train_loader = DataLoader(train_loader.dataset, shuffle=True, batch_size=real_batch_size)
if num_synth == 0:
return train_loader, None
synth_loader = DataLoader(TensorDataset(*synth_data), shuffle=True, batch_size=synth_batch_size)
return train_loader, synth_loader
def get_logits(model, data_loader):
model.eval()
logits = []
for minibatch in data_loader:
input_batch = cuda.try_cuda(minibatch[0])
with torch.no_grad():
logit_batch = model(input_batch)
if logit_batch.dim() == 3:
logit_batch = reduce_ensemble_logits(logit_batch)
logits.append(logit_batch.cpu())
return torch.cat(logits, dim=0)
def save_logits(config, student, teacher, generator, logger):
print('==== saving logits ====')
config = copy.deepcopy(config)
config.augmentation.transforms_list = None # no data augmentation for evaluation
config.dataloader.shuffle = False
_, test_loader, train_splits = get_loaders(config)
distill_splits = [train_splits[i] for i in config.distill_loader.splits]
distill_loader = hydra.utils.instantiate(config.distill_loader, teacher=teacher,
datasets=distill_splits, synth_sampler=generator)
student_train = get_logits(student, distill_loader)
logger.save_obj(student_train, 'student_train_logits.pkl')
teacher_train = get_logits(teacher, distill_loader)
logger.save_obj(teacher_train, 'teacher_train_logits.pkl')
del student_train, teacher_train, distill_loader
student_test = get_logits(student, test_loader)
logger.save_obj(student_test, 'student_test_logits.pkl')
teacher_test = get_logits(teacher, test_loader)
logger.save_obj(teacher_test, 'teacher_test_logits.pkl')
del student_test, teacher_test, test_loader
# if synth_data is None:
# return None
# synth_loader = DataLoader(TensorDataset(*synth_data), shuffle=False,
# batch_size=config.dataloader.batch_size)
# student_synth = get_logits(student, synth_loader)
# logger.save_obj(student_synth, 'student_synth_logits.pkl')
# teacher_synth = get_logits(teacher, synth_loader)
# logger.save_obj(teacher_synth, 'teacher_synth_logits.pkl')
| StarcoderdataPython |
4835568 | from abc import ABCMeta, abstractmethod
import six
class Lakehouse(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
@abstractmethod
def hydrate(self, context, table_type, table_metadata, table_handle):
pass
@abstractmethod
def materialize(self, context, table_type, table_metadata, value):
pass
| StarcoderdataPython |
3297809 |
from IPython.display import display
import pandas
from Datascrap import I_date,I_frequency,end,start,I_wordtocount,I_sentpolarity,I_sentsubjectivity,I_score,I_type
# --------------------------------------------------------------------------------#
print("Total Posts,Comments & Replies = " + str(len(I_date)) + "\n")
print("There are - " + str(sum(I_frequency)) + " mentions of " + "| " + I_wordtocount + " |" + "\n")
print("Time taken to run =" + str(end - start) + "\n")
# --------------------------------------------------------------#
# Average polarity calculations(Overall)
actualvaluespol = (len(I_sentpolarity) - (I_sentpolarity.count(0)))
sumpolarity = sum(I_sentpolarity)
avgpolarity = sumpolarity / actualvaluespol
print('Average polarity = ' + str(avgpolarity) + "\n")
# Average subjectivity calculations(Overall)
actualvaluessub = (len(I_sentsubjectivity) - (I_sentsubjectivity.count(0)))
sumsubjectivity = sum(I_sentsubjectivity)
avgsubjectivty = sumsubjectivity / actualvaluessub
print('Average Subjectivity = ' + str(avgsubjectivty))
# --------------------------------------------------------------#
# all data
data = {'Dates': I_date, 'Frequency': I_frequency, 'Sentiment_Polarity': I_sentpolarity,
'SentSubjectivity': I_sentsubjectivity, 'Score': I_score, 'Type': I_type}
table = pandas.DataFrame(data)
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
display(table)
print(table)
# --------------------------------------------------------------#
# grouped data for hourly plots
I_hourlydate = []
for date in I_date:
# I_hourlydate.append(str(date.year)+"."+ str(date.month)+"."+ str(date.day)+"-"+str(date.hour))
newdate = (str(date.year) + str(date.month) + str(date.day) + str(date.hour))
I_hourlydate.append(int(newdate))
groupeddata = {'Dates': I_hourlydate, 'Frequency': I_frequency, 'Sentiment_Polarity': I_sentpolarity,
'SentSubjectivity': I_sentsubjectivity, 'Score': I_score}
tablegrouped = pandas.DataFrame(groupeddata)
grouptedtable = tablegrouped.groupby('Dates').sum()
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
display(grouptedtable)
# ---------------------------------------------------------------------------------------#
| StarcoderdataPython |
1612781 | # Generated by Django 3.2.7 on 2021-09-01 17:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DemoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DemoModelField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('char', models.CharField(max_length=255)),
('integer', models.IntegerField()),
('logic', models.BooleanField(default=False)),
('null_logic', models.NullBooleanField(default=None)),
('date', models.DateField()),
('datetime', models.DateTimeField()),
('time', models.TimeField()),
('decimal', models.DecimalField(decimal_places=3, max_digits=10)),
('email', models.EmailField(max_length=254)),
('float', models.FloatField()),
('bigint', models.BigIntegerField()),
('generic_ip', models.GenericIPAddressField()),
('url', models.URLField()),
('text', models.TextField()),
('unique', models.CharField(max_length=255, unique=True)),
('nullable', models.CharField(max_length=255, null=True)),
('blank', models.CharField(blank=True, max_length=255, null=True)),
('not_editable', models.CharField(blank=True, editable=False, max_length=255, null=True)),
('choices', models.IntegerField(choices=[(1, 'Choice 1'), (2, 'Choice 2'), (3, 'Choice 3')])),
],
),
migrations.CreateModel(
name='DemoRelated',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DemoModel2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('demo_items', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='demoapp.demomodel', verbose_name='Demo Related')),
],
),
migrations.AddField(
model_name='demomodel',
name='demo_related',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related', to='demoapp.demorelated', verbose_name='Demo Related'),
),
migrations.CreateModel(
name='DemoModel_IntersectionFieldListFilter',
fields=[
],
options={
'verbose_name': 'IntersectionFieldListFilter',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('demoapp.demomodel',),
),
migrations.CreateModel(
name='DemoModel_RelatedFieldCheckBoxFilter',
fields=[
],
options={
'verbose_name': 'RelatedFieldCheckBoxFilter',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('demoapp.demomodel',),
),
migrations.CreateModel(
name='DemoModel_RelatedFieldRadioFilter',
fields=[
],
options={
'verbose_name': 'RelatedFieldRadioFilter',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('demoapp.demomodel',),
),
migrations.CreateModel(
name='DemoModel_UnionFieldListFilter',
fields=[
],
options={
'verbose_name': 'UnionFieldListFilter',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('demoapp.demomodel',),
),
]
| StarcoderdataPython |
30772 | import os
import shutil
from modulefinder import ModuleFinder
def main():
temp_dir = "package_temp"
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
for py in ["index.py", "notifier.py"]:
src, dst = py, os.path.join(temp_dir, py)
print("copy '%s' to '%s'" % (src, dst))
shutil.copy(src, dst)
print("analysing modules ...")
finder = ModuleFinder()
finder.run_script("index.py")
module_paths = set()
for name, mod in finder.modules.items():
if mod.__path__ and "site-packages" in mod.__path__[0]:
path = mod.__path__[0]
while os.path.basename(os.path.dirname(path)) != "site-packages":
path = os.path.dirname(path)
if path not in module_paths:
src, dst = path, os.path.join(temp_dir, os.path.basename(path))
print("copy '%s' from '%s' to '%s'" % (name, src, dst))
shutil.copytree(src, dst, ignore=shutil.ignore_patterns("__pycache__", "*.pyc"))
module_paths.add(path)
zip_file = "notify-github-release"
print("zipping %s to %s.zip ..." % (temp_dir, zip_file))
if os.path.exists(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', temp_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
print("done")
if __name__ == '__main__':
main()
| StarcoderdataPython |
154620 | from rest_framework import serializers
from .models import Movie
class MovieSerializer(serializers.ModelSerializer):
class Meta:
fields = (
"name",
"plot",
"year",
"director",
"actors",
"image",
"ratings",
"url"
)
model = Movie
| StarcoderdataPython |
1790366 | <reponame>unplugstudio/mezzanine-webinars
import os
import shutil
import sys
import tempfile
import django
from pathlib2 import Path
# Path to the temp mezzanine project folder
TMP_PATH = Path(tempfile.mkdtemp()) / "project_template"
# Injected at the bottom of local_settings.py
TEST_SETTINGS = """
# START INJECTED SETTINGS
INSTALLED_APPS = list(INSTALLED_APPS)
if "mezzanine.accounts" not in INSTALLED_APPS:
INSTALLED_APPS.append("mezzanine.accounts")
INSTALLED_APPS.append("mezzanine_seminars")
# Use the MD5 password hasher by default for quicker test runs.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
# END INJECTED SETTINGS
"""
# Injected at the bottom of urls.py
TEST_URLS = """
# START INJECTED URLCONFIG
urlpatterns = list(urlpatterns)
urlpatterns.insert(
0, url("^seminars/", include("mezzanine_seminars.urls", namespace="seminars"))
)
# END INJECTED URLCONFIG
"""
def after_django_setup():
"""
Runs once per testing session AFTER Django has been set up.
"""
from ddf import teach
from mezzanine_seminars.models import Seminar
# When creating Seminars we don't want to create extra sites
teach(Seminar, site=None, featured_image="")
def pytest_report_header(config):
"""
Have pytest report the path of the project folder
"""
return "mezzanine proj (tmp): {}".format(TMP_PATH)
def pytest_configure():
"""
Hack the `project_template` dir into an actual project to test against.
"""
from mezzanine.utils.importing import path_for_import
template_path = Path(path_for_import("mezzanine")) / "project_template"
shutil.copytree(str(template_path), str(TMP_PATH))
proj_path = TMP_PATH / "project_name"
# Settings
local_settings = (proj_path / "local_settings.py.template").read_text()
(proj_path / "local_settings.py").write_text(local_settings + TEST_SETTINGS)
# URLs
urls = (proj_path / "urls.py").read_text()
(proj_path / "urls.py").write_text(urls + TEST_URLS)
# Setup the environment for Django
sys.path.insert(0, str(TMP_PATH))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.settings")
django.setup()
after_django_setup()
def pytest_unconfigure():
"""
Remove the temporary folder
"""
try:
shutil.rmtree(str(TMP_PATH))
except OSError:
pass
| StarcoderdataPython |
4809094 | import tornado.ioloop
import tornado.web
from tornado.gen import coroutine
from tornado_swirl import api_routes
from tornado_swirl.swagger import Application, describe, restapi, schema, add_global_tag, add_security_scheme
from tornado_swirl.openapi import security
describe(title='Test API', description='Just things to test',
swagger_ui_handlers_headers=[
('Cache-Control', 'public'),
('Cache-Control', 'max-age=300')
])
add_global_tag("internal", "Internal Use Only", "http://foo.com/tags")
add_security_scheme("test_api_key", security.HTTP('bearer', 'JWT'))
add_security_scheme("api_key", security.APIKey('X-API-KEY'))
# @restapi(url="/test")
# class MainHandler(tornado.web.RequestHandler):
# """Foo"""
# async def get(self):
# """Test summary
# Test description
# Query Params:
# param1 (integer) -- required. test
# minimum: 1 maximum: 200 exclusiveMaximum: true
# Response:
# x (enum[a,b,c]) -- Foomanchu
# """
# self.finish()
# @restapi("/test/(?P<emp_uid>\d+)/(?P<date>[\w-]+)")
# class TestHandler(tornado.web.RequestHandler):
# """Mother ship"""
# async def get(self, emp_uid, date):
# """Test get
# Hiho
# Cookie:
# x (string) -- some foo
# Path Params:
# emp_uid (int) -- test
# date (enum[a,b,c]) -- test
# 200 Response:
# test ([User]) -- Test data
# 201 Response:
# test (User) -- Test user
# Error Response:
# 400 -- Fudge
# """
# self.finish()
# @restapi('/item/(?P<itemid>\d+)')
# class ItemHandler(tornado.web.RequestHandler):
# def get(self, itemid):
# """Get Item data.
# Gets Item data from database.
# Path Parameter:
# itemid (integer) -- The item id
# """
# pass
# @restapi('/withrequestbody')
# class FooHandler(tornado.web.RequestHandler):
# def get(self, itemid):
# """Get Item data.
# Gets Item data from database.
# Request Body:
# itemid (integer) -- The item id
# """
# pass
# @restapi('/withrequestbody2')
# class FooHandler2(tornado.web.RequestHandler):
# def get(self, itemid):
# """Get Item data.
# Gets Item data from database.
# Request Body:
# file (file:text/csv) -- CSV file.
# """
# pass
# @restapi('/withrequestbody3')
# class FooHandler3(tornado.web.RequestHandler):
# def get(self, itemid):
# """Get Item data.
# Gets Item data from database.
# Request Body:
# file (file:text/csv) -- CSV file.
# name (string) -- required. Foo name.
# """
# pass
@restapi('/chunky')
class FooHandler4(tornado.web.RequestHandler):
@coroutine
def get(self):
self.set_header('Content-Type', 'application/json')
self.write('{ "data": ')
self.write('"')
for i in range(1000):
self.write('foobar')
yield self.flush()
self.write('" }')
yield self.flush()
@restapi('/withrequestbody5')
class FooHandler5(tornado.web.RequestHandler):
def get(self):
"""Get Item data.
Gets Item data from database.
Security:
api_key --
test_api_key --
"""
self.finish()
def post(self):
"""Get Item data.
Gets Item data from database.
HTTP Headers:
Tt-I2ap-Id -- Uri.
Tt-I2ap-Sec -- Some Hex token
Request Body:
user (object) -- required. User data.
"""
pass
@restapi('/withrequestbody6')
class FooHandler6(tornado.web.RequestHandler):
def post(self):
"""Create Admin
Request Body:
user (Admin) -- required. User data.
"""
pass
@schema
class User(object):
"""User
User def
Properties:
underscore_test -- Test
m9_9 -- Test
name (string) -- required. The name
age (int) -- The age.
minimum: 1 maximum: 100
"""
pass
@schema
class Admin(User):
"""Admin is a User
Properties:
superpowers ([string]) -- list of superpowers.
"""
# class Meta:
# examples = {
# "Ron": {
# "name": "Ronald",
# "age": 10,
# "superpowers": ["a", "b", "c"]
# },
# "Don": {
# "name": "McDonald",
# "age": 12,
# "superpowers": ["c", "d", "e"]
# }
# }
class Meta:
example = {
"name": "Ronald",
"age": 10,
"superpowers": ["a", "b", "c"]
}
@restapi('/path/to/api')
class MyHandler(tornado.web.RequestHandler):
async def get(self):
"""This will be the API path summary.
While the long description will be the API description.
Query Parameters:
date (date) -- Required. The target date.
sort (enum[asc, desc, with-params]) -- Optional. Sort order.
items (int) -- Optional. Number of items to display.
minimum: 100 maximum: 200
Returns:
items ([string]) -- List of random strings.
Error Responses:
200 (Admin) -- Test Admin.
400 (ErrorResponse) -- Bad Request.
500 (ErrorResponse) -- Internal Server Error.
Tags:
internal api
"""
self.finish()
@schema
class ErrorResponse(object):
"""Error response object.
Properties:
type (enum[job-errors, 2]) -- Job Errors
code (int) -- Required. Error code.
message (string) -- Error description.
readOnly: true
details (object) -- Object
minProperties: 2
"""
class Meta:
example = {
"code": 400,
"message": "Some message",
"details": {
"foo": True,
"bar": False
}
}
def make_app():
return Application(api_routes(), autoreload=True)
if __name__ == "__main__":
print("Test app")
app = make_app()
app.debug = True
app.listen(8001)
tornado.ioloop.IOLoop.current().start()
| StarcoderdataPython |
1767996 | <reponame>Rohith04MVK/Neutron-Bot
import typing as t
from abc import abstractmethod
from collections import defaultdict
from contextlib import suppress
from dataclasses import field, make_dataclass
from importlib import import_module
import asyncpg
from loguru import logger
if t.TYPE_CHECKING:
from bot.core.bot import Bot
class Singleton(type):
"""
This is Singleton Design Pattern.
It makes sure that classes with this metaclass
will only ever have one single instance, when they're
initiated for the first time this instance is created,
every next initiation will simply result in returning
the stored single instace.
"""
_instance = None
def __call__(cls, *args, **kwargs):
"""If instance already exists, return it."""
if not cls._instance:
cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance
class DBTable(metaclass=Singleton):
"""
This is a basic database table structure model.
This class automatically creates the initial database
tables accordingly to `columns` dict which is a mandantory
class parameter defined in the top-level class, it should
look like this:
columns = {
"column_name": "SQL creation syntax",
"example": "NUMERIC(40) UNIQUE NOT NULL"
...
}
After the table is populated, caching will be automatically
set up based on the `caching` dict which is an optional class
parameter defined in the top-level class, if this parameter isn't
defined, caching will be skipped. Example for caching:
caching = {
"key": "table_name", # This will be the key for the stored `cache` dict
# These will be the entries for the cache
"column_name": (python datatype, default_value),
"column_name2": python datatype # default_value is optional
}
There are also multiple methods which serves as an abstraction
layer for for executing raw SQL code.
There is also a special `reference` classmethod which will
return the running instance (from the singleton model).
"""
def __init__(self, db: "Database", table_name: str):
self.database = db
self.table = table_name
self.pool = self.database.pool
self.timeout = self.database.timeout
self.cache = {}
@abstractmethod
async def __async_init__(self) -> None:
"""
This is asynchronous initialization function which
will get automatically called by `Database` when
the table is added. (Calling this method is handeled
by the `_populate` function).
"""
raise NotImplementedError
async def _init(self) -> None:
"""
This method calls `_populate` and `_make_cache`
to make all the db tables and create the table cache.
After that, `__async_init__` method is called which
refers to top-level async initialization, if this
method isn't defined, nothing will happen.
This also makes sure that `columns` dictionary is
defined properly in the top-level class..
"""
if not hasattr(self, "columns") or not isinstance(self.columns, dict):
raise RuntimeError(f"Table {self.__class__} doesn't have a `columns` dict defined properly.")
await self._populate()
await self._make_cache()
with suppress(NotImplementedError):
await self.__async_init__()
async def _populate(self) -> None:
"""
This method is used to create the initial table structure
and define it's structure and columns.
This method also calls `__async_init__` method on top level table
(if there is one).
"""
table_structure = ",\n".join(f"{column} {sql_details}" for column, sql_details in self.columns.items())
populate_command = f"CREATE TABLE IF NOT EXISTS {self.table} (\n{table_structure}\n)"
logger.trace(f"Populating {self.__class__}")
async with self.pool.acquire(timeout=self.timeout) as db:
await db.execute(populate_command)
async def _make_cache(self) -> None:
"""
Crate and populate basic caching model from top-level `self.caching`.
This function creates `self.cache_columns` which stores the cached columns
and their type together with `self.cache` which stores the actual cache.
"""
if not hasattr(self, "caching") or not isinstance(self.caching, dict):
logger.trace(f"Skipping defining cache for {self.__class__}, `caching` dict wasn't specified")
return
self.cache_columns = {}
cache_key_type, self._cache_key = self.caching.pop("key")
self.cache_columns[self._cache_key] = cache_key_type
# Create cache model
field_list = []
for column, specification in self.caching.items():
if isinstance(specification, tuple):
val = (column, specification[0], field(default=specification[1]))
_type = specification[0]
elif specification is None:
val = column
_type = None
else:
val = (column, specification)
_type = specification
field_list.append(val)
self.cache_columns[column] = _type
self._cache_model = make_dataclass("Entry", field_list)
# Create and populate the cache
self.cache = defaultdict(self._cache_model)
columns = list(self.columns.keys())
entries = await self.db_get(columns) # Get db entries to store
for entry in entries:
db_entry = {}
for col_name, record in zip(columns, entry):
# Convert to specified type
with suppress(IndexError, TypeError):
_type = self.cache_columns[col_name]
record = _type(record)
db_entry[col_name] = record
# Store the cache model into the cache
key = db_entry.pop(self._cache_key)
cache_entry = self._cache_model(**db_entry)
self.cache[key] = cache_entry
def cache_update(self, key: str, column: str, value: t.Any) -> None:
"""
Update the stored cache value for `update_key` on `primary_value` to given `update_value`.
"""
setattr(self.cache[key], column, value)
def cache_get(self, key: str, column: str) -> t.Any:
"""
Obtain the value of `attribute` stored in cache for `primary_value`
"""
return getattr(self.cache[key], column)
@classmethod
def reference(cls) -> "DBTable":
"""
This is a method which returns the running instance of given class.
This works based on the singleton single instance model and it was
added as a substitution for calling __init__ from the top level class
directly, since that requires passing arguments which won't be used
due to the single instance model, using the `reference` function
allows you to retrieve this instance without the need of passing
any additional arguments.
It should be noted that using this will return the instance of the
top-level class, but the editor will only see it as an instance of
this class (`DBTable`) due to the return type being set to it.
To circumvent this you should statically define the type of the
variable which will be used to store this instance.
"""
return cls._instance
async def db_execute(self, sql: str, sql_args: t.Optional[list] = None) -> None:
"""
This method serves as an abstraction layer
from using context manager and executing the
sql command directly from there.
"""
if not sql_args:
sql_args = []
async with self.pool.acquire(timeout=self.timeout) as db:
await db.execute(sql, *sql_args)
async def db_fetchone(self, sql: str, sql_args: t.Optional[list] = None) -> asyncpg.Record:
"""
This method serves as an abstraction layer
from using context manager and fetching the
sql query directly from there.
"""
if not sql_args:
sql_args = []
async with self.pool.acquire(timeout=self.timeout) as db:
return await db.fetchrow(sql, *sql_args)
async def db_fetch(self, sql: str, sql_args: t.Optional[list] = None) -> t.List[asyncpg.Record]:
"""
This method serves as an abstraction layer
from using context manager and fetching the
sql query directly from there.
"""
if not sql_args:
sql_args = []
async with self.pool.acquire(timeout=self.timeout) as db:
return await db.fetch(sql, *sql_args)
async def db_get(
self, columns: t.List[str], specification: t.Optional[str] = None, sql_args: t.Optional[list] = None
) -> t.Union[asyncpg.Record, t.List[asyncpg.Record]]:
"""
This method serves as an abstraction layer
from using SQL syntax in the top-level database
table class, it runs the basic selection (get)
query without needing to use SQL syntax at all.
"""
sql = f"SELECT {' ,'.join(columns)} FROM {self.table}"
if specification:
sql += f" WHERE {specification}"
if len(columns) == 1:
return await self.db_fetchone(sql, sql_args)
return await self.db_fetch(sql, sql_args)
async def db_set(self, columns: t.List[str], values: t.List[str]) -> None:
"""
This method serves as an abstraction layer
from using SQL syntax in the top-level database
table class, it runs the basic insertion (set)
command without needing to use SQL syntax at all.
"""
sql_columns = ", ".join(columns)
sql_values = ", ".join(f"${n + 1}" for n in range(len(values)))
sql = f"""
INSERT INTO {self.table} ({sql_columns})
VALUES ({sql_values})
"""
await self.db_execute(sql, values)
async def db_upsert(self, columns: t.List[str], values: t.List[str], conflict_column: str) -> None:
"""
This method serves as an abstraction layer
from using SQL syntax in the top-level database
table class, it runs the basic insert/update (upsert)
command without needing to use SQL syntax at all.
"""
sql_columns = ", ".join(columns)
sql_values = ", ".join(f"${n + 1}" for n in range(len(values)))
sql_update = ""
for index, column in enumerate(columns):
if column != conflict_column:
sql_update += f"{column}=${index + 1}"
sql = f"""
INSERT INTO {self.table} ({sql_columns})
VALUES ({sql_values})
ON CONFLICT ({conflict_column}) DO
UPDATE SET {sql_update}
"""
await self.db_execute(sql, values)
class Database(metaclass=Singleton):
"""
This is the main connection class with the postgres database.
This class is here to ensure the ease of connecting and
disconnecting from the database and loading the top-level
database table classes.
"""
def __init__(self, db_parameters: dict, timeout: int = 5):
required_parameters = set(["host", "database", "user", "password"])
# Make sure db_parameters contains all required keys by checking
# if it's a subset of `required_parameters`
if required_parameters > set(db_parameters.keys()):
raise RuntimeError(
"The `db_parameters` dict doesn't contain one or more"
f" of the required parameters: {required_parameters}"
)
self.db_parameters = db_parameters
self.timeout = 5
self.tables = set()
async def connect(self) -> bool:
"""
Connect to the database using the `self.db_parameters`
provided in the `__init__` method.
Store this connection in `self.pool` attribute
"""
logger.debug("Connecting to the database")
try:
self.pool = await asyncpg.create_pool(**self.db_parameters)
except asyncpg.exceptions.PostgresError:
logger.error("Unable to connect to the database")
return False
return True
async def disconnect(self) -> None:
"""Close the database pool connection."""
logger.debug("Closing connection to the database")
await self.pool.close()
async def load_tables(self, tables: t.List[str], bot: "Bot") -> None:
"""
Load on all given `tables`.
This function imports every table in `tables` and awaits
the `load` coroutine which initiates the top-level
database table class and calls `self.load_table`.
"""
for table in tables:
logger.trace(f"Adding {table} table")
module = import_module(table)
if not hasattr(module, "load"):
logger.error(f"Unable to load table: {table} (this: {__name__} module: {module}), it doesn't have the async `load` function set up")
return
await module.load(bot, self)
async def add_table(self, table: DBTable) -> None:
"""
Add the `table` into the `self.tables` set and
execute it's `_populate` function.
In case the `table` is already added, log a warning
and don't add it into the table. The `_populate` function
won't be called either.
"""
if table in self.tables:
logger.warning(f"Tried to add already added table ({table.__class__}), skipping.")
return
if not isinstance(table, DBTable):
raise TypeError("`table` argument must be an instance of `DBTable`")
self.tables.add(table)
await table._init()
async def remove_table(self, table: "DBTable") -> None:
"""
Remove the table from `self.tables`.
This also reset's the `table`s unique singleton instance.
"""
if table not in self.tables:
logger.warning(f"Tried to remove unknown table ({table.__class__})")
logger.trace(f"Removing {table.__class__}")
self.tables.remove(table)
table._instance = None
| StarcoderdataPython |
1761262 | # -*- coding: utf-8 -*-
"""
idfy_rest_client.models.company_info_difi_response
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
class CompanyInfoDifiResponse(object):
"""Implementation of the 'CompanyInfoDifiResponse' model.
TODO: type model description here.
Attributes:
org_nr (string): TODO: type description here.
org_name (string): TODO: type description here.
address (string): TODO: type description here.
postal_code (string): TODO: type description here.
city (string): TODO: type description here.
website (string): TODO: type description here.
country (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"org_nr":'OrgNr',
"org_name":'OrgName',
"address":'Address',
"postal_code":'PostalCode',
"city":'City',
"website":'Website',
"country":'Country'
}
def __init__(self,
org_nr=None,
org_name=None,
address=None,
postal_code=None,
city=None,
website=None,
country=None,
additional_properties = {}):
"""Constructor for the CompanyInfoDifiResponse class"""
# Initialize members of the class
self.org_nr = org_nr
self.org_name = org_name
self.address = address
self.postal_code = postal_code
self.city = city
self.website = website
self.country = country
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
org_nr = dictionary.get('OrgNr')
org_name = dictionary.get('OrgName')
address = dictionary.get('Address')
postal_code = dictionary.get('PostalCode')
city = dictionary.get('City')
website = dictionary.get('Website')
country = dictionary.get('Country')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(org_nr,
org_name,
address,
postal_code,
city,
website,
country,
dictionary)
| StarcoderdataPython |
3380843 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose_parameterized import parameterized
from dino.db import IDatabase
from dino.db.rdbms.handler import DatabaseRdbms
from dino.db.redis import DatabaseRedis
__author__ = '<NAME> <<EMAIL>>'
class RedisHasInterfaceMethodsTest(unittest.TestCase):
interface_methods = [name for name in IDatabase.__dict__['_InterfaceClass__attrs'].keys()]
def setUp(self):
self.redis_methods = set(
[
key for key in DatabaseRedis.__dict__.keys()
if not key.startswith('_') and callable(DatabaseRedis.__dict__[key])
]
)
@parameterized.expand(interface_methods)
def test_method_is_implemented(self, method):
self.assertIn(method, self.redis_methods)
class RdbmsHasInterfaceMethodsTest(unittest.TestCase):
interface_methods = [name for name in IDatabase.__dict__['_InterfaceClass__attrs'].keys()]
def setUp(self):
self.rdbms_methods = set(
[
key for key in DatabaseRdbms.__dict__.keys()
if not key.startswith('_') and callable(DatabaseRdbms.__dict__[key])
]
)
@parameterized.expand(interface_methods)
def test_method_is_implemented(self, method):
self.assertIn(method, self.rdbms_methods)
class RdbmsHasOnlyInterfaceMethodsTest(unittest.TestCase):
rdbms_methods = set(
[
key for key in DatabaseRdbms.__dict__.keys()
if not key.startswith('_') and callable(DatabaseRdbms.__dict__[key])
]
)
def setUp(self):
self.interface_methods = [name for name in IDatabase.__dict__['_InterfaceClass__attrs'].keys()]
@parameterized.expand(rdbms_methods)
def test_method_is_in_interface(self, method):
self.assertIn(method, self.interface_methods)
class RedisHasOnlyInterfaceMethodsTest(unittest.TestCase):
redis_methods = set(
[
key for key in DatabaseRedis.__dict__.keys()
if not key.startswith('_') and callable(DatabaseRedis.__dict__[key])
]
)
def setUp(self):
self.interface_methods = [name for name in IDatabase.__dict__['_InterfaceClass__attrs'].keys()]
@parameterized.expand(redis_methods)
def test_method_is_in_interface(self, method):
self.assertIn(method, self.interface_methods)
| StarcoderdataPython |
1698156 | """
This is an implementation of Generative Adversarial Imiation Learning
See https://arxiv.org/abs/1606.03476
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from machina import loss_functional as lf
from machina import logger
from machina.algos import trpo, ppo_kl, ppo_clip
from machina.utils import get_device
def update_discrim(discrim, optim_discrim, agent_batch, expert_batch, ent_beta=0.001):
discrim_loss = lf.cross_ent(
discrim, agent_batch, expert_or_agent=0, ent_beta=ent_beta)
discrim_loss += lf.cross_ent(discrim, expert_batch,
expert_or_agent=1, ent_beta=ent_beta)
discrim_loss /= 2
optim_discrim.zero_grad()
discrim_loss.backward()
optim_discrim.step()
return discrim_loss.detach().cpu().numpy()
def train(agent_traj, expert_traj, pol, vf, discrim,
optim_vf, optim_discim,
rl_type='trpo',
pol_ent_beta=0, discrim_ent_beta=0,
epoch=1,
batch_size=64, discrim_batch_size=32,
num_epi_per_seq=1, discrim_step=1, # optimization hypers
damping=0.1, max_kl=0.01, num_cg=10, # trpo hypers
optim_pol=None,
clip_param=0.2, max_grad_norm=0.5, clip_vfunc=False, kl_beta=1, kl_targ=0.01, # ppo hypers
log_enable=True,
):
pol_losses = []
vf_losses = []
discrim_losses = []
if log_enable:
logger.log("Optimizing...")
if rl_type == 'trpo':
iterator = agent_traj.full_batch(1) if not pol.rnn else agent_traj.iterate_rnn(
batch_size=agent_traj.num_epi)
for batch in iterator:
pol_loss = trpo.update_pol(
pol, batch, max_kl=max_kl, num_cg=num_cg, damping=damping, ent_beta=pol_ent_beta)
pol_losses.append(pol_loss)
iterator = agent_traj.iterate(batch_size, epoch) if not pol.rnn else agent_traj.iterate_rnn(batch_size=batch_size,
num_epi_per_seq=num_epi_per_seq,
epoch=epoch)
for batch in iterator:
vf_loss = trpo.update_vf(vf, optim_vf, batch)
vf_losses.append(vf_loss)
new_kl_beta = 0
kl_mean = 0
elif rl_type == 'ppo_clip':
iterator = agent_traj.iterate(batch_size, epoch) if not pol.rnn else agent_traj.iterate_rnn(batch_size=batch_size,
num_epi_per_seq=num_epi_per_seq,
epoch=epoch)
for batch in iterator:
pol_loss = ppo_clip.update_pol(
pol, optim_pol, batch, clip_param, pol_ent_beta, max_grad_norm)
vf_loss = ppo_clip.update_vf(
vf, optim_vf, batch, clip_param, clip_vfunc, max_grad_norm)
pol_losses.append(pol_loss)
vf_losses.append(vf_loss)
new_kl_beta = 0
kl_mean = 0
elif rl_type == 'ppo_kl':
iterator = agent_traj.iterate(batch_size, epoch) if not pol.rnn else agent_traj.iterate_rnn(batch_size=batch_size,
num_epi_per_seq=num_epi_per_seq,
epoch=epoch)
for batch in iterator:
pol_loss = ppo_kl.update_pol(
pol, optim_pol, batch, kl_beta, max_grad_norm, pol_ent_beta)
vf_loss = ppo_kl.update_vf(vf, optim_vf, batch)
pol_losses.append(pol_loss)
vf_losses.append(vf_loss)
iterator = agent_traj.full_batch(1) if not pol.rnn else agent_traj.iterate_rnn(
batch_size=agent_traj.num_epi)
batch = next(iterator)
with torch.no_grad():
pol.reset()
if pol.rnn:
_, _, pd_params = pol(batch['obs'], h_masks=batch['h_masks'])
else:
_, _, pd_params = pol(batch['obs'])
kl_mean = torch.mean(
pol.pd.kl_pq(
batch,
pd_params
)
).item()
if kl_mean > 1.3 * kl_targ:
new_kl_beta = 1.5 * kl_beta
elif kl_mean < 0.7 * kl_targ:
new_kl_beta = kl_beta / 1.5
else:
new_kl_beta = kl_beta
else:
raise ValueError('Only trpo, ppo_clip and ppo_kl are supported')
agent_iterator = agent_traj.iterate_step(
batch_size=discrim_batch_size, step=discrim_step)
expert_iterator = expert_traj.iterate_step(
batch_size=discrim_batch_size, step=discrim_step)
for agent_batch, expert_batch in zip(agent_iterator, expert_iterator):
discrim_loss = update_discrim(
discrim, optim_discim, agent_batch, expert_batch, ent_beta=discrim_ent_beta)
discrim_losses.append(discrim_loss)
if log_enable:
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses, VfLoss=vf_losses, DiscrimLoss=discrim_losses, new_kl_beta=new_kl_beta, kl_mean=kl_mean)
| StarcoderdataPython |
90114 | <reponame>ak-ustutt/GeCCo-public<gh_stars>0
from python_interface.gecco_interface import *
new_target('TEST_ADD_UNITY',True)
DEF_OP_FROM_OCC({
LABEL:"DUMMY_1",
DESCR:'P,P|PP,PP|V,V|VV,VV|H,H|HH,HH'
})
SET_HERMITIAN({
LABEL:"DUMMY_1",
CA_SYMMETRY:+1})
DEF_ME_LIST({
LIST:'ME_DUMMY_1',
OPERATOR:'DUMMY_1',
IRREP:1,
'2MS':0,
AB_SYM:+1,
DIAG_TYPE:1,
MAX_REC:3,
MIN_REC:1,
REC:2
})
DEF_ME_LIST({
LIST:'ME_DUMMY_2',
OPERATOR:'DUMMY_1',
IRREP:1,
'2MS':0,
AB_SYM:+1,
DIAG_TYPE:1,
MAX_REC:3,
MIN_REC:1,
REC:2
})
ADD_UNITY({
LIST:'ME_DUMMY_1',
FAC:0.5,
INIT:True,
MS_SYM_SIGN:1})
PRINT({STRING:"Mode square"})
SCALE_COPY({LIST_RES:'ME_DUMMY_2',
LIST_INP:'ME_DUMMY_1',
FAC:3,
MODE:'square'
})
PRINT_MEL({LIST:'ME_DUMMY_2'})
PRINT({STRING:"Mode prc-thresh"})
SCALE_COPY({LIST_RES:'ME_DUMMY_2',
LIST_INP:'ME_DUMMY_1',
FAC:0.8,
MODE:'prc-thresh'
})
PRINT_MEL({LIST:'ME_DUMMY_2'})
PRINT({STRING:"Mode scale"})
SCALE_COPY({LIST_RES:'ME_DUMMY_2',
LIST_INP:'ME_DUMMY_1',
FAC:2.0,
MODE:'scale'
})
PRINT_MEL({LIST:'ME_DUMMY_2'})
PRINT({STRING:"Mode precond"})
# PReparing "preconditioner"
SCALE_COPY({LIST_RES:'ME_DUMMY_2',
LIST_INP:'ME_DUMMY_2',
FAC:0.0,
MODE:'scale'
})
SCALE_COPY({LIST_RES:'ME_DUMMY_2',
LIST_INP:'ME_DUMMY_2',
FAC:0.5,
MODE:'prc-thresh'
})
SCALE_COPY({LIST_RES:'ME_DUMMY_1',
LIST_INP:'ME_DUMMY_2',
FAC:1.0,
MODE:'precond'
})
PRINT_MEL({LIST:'ME_DUMMY_1'})
#
#DEF_OP_FROM_OCC({
# LABEL:"DUMMY_2",
# DESCR:'P,H|H,V|H,P'
#})
#DEF_ME_LIST({
# LIST:'ME_DUMMY_2',
# OPERATOR:'DUMMY_2',
# IRREP:1,
# '2MS':0,
# AB_SYM:+1,
# DIAG_TYPE:1,
# # MAX_REC:3,
# MIN_REC:1,
# REC:2
#})
#
#ADD_UNITY({
# # LIST:'ME_DUMMY_2',
# FAC:1.0,
# INIT:True,
# MS_SYM_SIGN:-1
#})
#
#PRINT_MEL({LIST:'ME_DUMMY_2'})
| StarcoderdataPython |
1600563 | <reponame>ealogar/servicedirectory
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
import os
from setuptools import setup, find_packages
#README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# find packages
_packages = map(lambda x: 'sd-api.{0}'.format(x), find_packages('sd-api'))
_packages = filter(lambda x: x.find('.test')==-1, _packages)
_packages.append('sd-api')
setup(
name='service-directory',
version='1.0.0',
packages=_packages,
package_data = {'': ['static/rest_framework/js/*',
'static/rest_framework/css/*',
'static/rest_framework/img/*',
'schemas/*.json']},
license='(C) Telefonica I+D', # example license
description='DNS for retrieving endpoints.',
long_description='README',
url='http://www.tid.es',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| StarcoderdataPython |
60097 | <reponame>timothydmorton/CCL
from . import ccllib as lib
from .pyutils import check
import numpy as np
class Pk2D(object):
"""A power spectrum class holding the information needed to reconstruct an
arbitrary function of wavenumber and scale factor.
Args:
pkfunc (:obj:`function`): a function returning a floating point
number or numpy array with the signature `f(k,a)`, where k
is a wavenumber (in units of Mpc^-1) and a is the scale
factor. The function must able to take numpy arrays as `k`.
The function must return the value(s) of the power spectrum
(or its natural logarithm, depending on the value of
`is_logp`. The power spectrum units should be compatible
with those used by CCL (e.g. if you're passing a matter power
spectrum, its units should be Mpc^3). If this argument is not
`None`, this function will be sampled at the values of k and
a used internally by CCL to store the linear and non-linear
power spectra.
a_arr (array): an array holding values of the scale factor
lk_arr (array): an array holding values of the natural logarithm
of the wavenumber (in units of Mpc^-1).
pk_arr (array): a 2D array containing the values of the power
spectrum at the values of the scale factor and the wavenumber
held by `a_arr` and `lk_arr`. The shape of this array must be
`[na,nk]`, where `na` is the size of `a_arr` and `nk` is the
size of `lk_arr`. This array can be provided in a flattened
form as long as the total size matches `nk*na`. The array can
hold the values of the natural logarithm of the power
spectrum, depending on the value of `is_logp`. If `pkfunc`
is not None, then `a_arr`, `lk_arr` and `pk_arr` are ignored.
However, either `pkfunc` or all of the last three array must
be non-None. Note that, if you pass your own Pk array, you
are responsible of making sure that it is sufficiently well
sampled (i.e. the resolution of `a_arr` and `lk_arr` is high
enough to sample the main features in the power spectrum).
For reference, CCL will use bicubic interpolation to evaluate
the power spectrum at any intermediate point in k and a.
extrap_order_lok (int): extrapolation order to be used on k-values
below the minimum of the splines (use 0, 1 or 2). Note that
the extrapolation will be done in either log(P(k)) or P(k),
depending on the value of `is_logp`.
extrap_order_hik (int): extrapolation order to be used on k-values
above the maximum of the splines (use 0, 1 or 2). Note that
the extrapolation will be done in either log(P(k)) or P(k),
depending on the value of `is_logp`.
is_logp (boolean): if True, pkfunc/pkarr return/hold the natural
logarithm of the power spectrum. Otherwise, the true value
of the power spectrum is expected. Note that arrays will be
interpolated in log space if `is_logp` is set to `True`.
cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object. The cosmology
object is needed in order if `pkfunc` is not `None`. The object is
used to determine the sampling rate in scale factor and
wavenumber.
"""
def __init__(self, pkfunc=None, a_arr=None, lk_arr=None, pk_arr=None,
is_logp=True, extrap_order_lok=1, extrap_order_hik=2,
cosmo=None):
status = 0
if pkfunc is None: # Initialize power spectrum from 2D array
# Make sure input makes sense
if (a_arr is None) or (lk_arr is None) or (pk_arr is None):
raise ValueError("If you do not provide a function, "
"you must provide arrays")
pkflat = pk_arr.flatten()
# Check dimensions make sense
if (len(a_arr)*len(lk_arr) != len(pkflat)):
raise ValueError("Size of input arrays is inconsistent")
else: # Initialize power spectrum from function
# Check that the input function has the right signature
try:
pkfunc(k=np.array([1E-2, 2E-2]), a=0.5)
except Exception:
raise ValueError("Can't use input function")
if cosmo is None:
raise ValueError("A cosmology is needed if initializing "
"power spectrum from a function")
# Set k and a sampling from CCL parameters
nk = lib.get_pk_spline_nk(cosmo.cosmo)
na = lib.get_pk_spline_na(cosmo.cosmo)
a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status)
check(status)
lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status)
check(status)
# Compute power spectrum on 2D grid
pkflat = np.zeros([na, nk])
for ia, a in enumerate(a_arr):
pkflat[ia, :] = pkfunc(k=np.exp(lk_arr), a=a)
pkflat = pkflat.flatten()
self.psp, status = lib.set_pk2d_new_from_arrays(lk_arr, a_arr, pkflat,
int(extrap_order_lok),
int(extrap_order_hik),
int(is_logp), status)
check(status)
self.has_psp = True
def eval(self, k, a, cosmo):
"""Evaluate power spectrum.
Args:
k (float or array_like): wavenumber value(s) in units of Mpc^-1.
a (float): value of the scale factor
cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object. The
cosmology object is needed in order to evaluate the power
spectrum outside the interpolation range in `a`. E.g. if you
want to evaluate the power spectrum at a very small a, not
covered by the arrays you passed when initializing this object,
the power spectrum will be extrapolated from the earliest
available value using the linear growth factor (for which a
cosmology is needed).
a_arr (array): an array holding values of the scale factor.
Returns:
float or array_like: value(s) of the power spectrum.
"""
# make sure we have growth factors for extrapolation
cosmo.compute_growth()
status = 0
cospass = cosmo.cosmo
if isinstance(k, int):
k = float(k)
if isinstance(k, float):
f, status = lib.pk2d_eval_single(self.psp, np.log(k), a, cospass,
status)
else:
k_use = np.atleast_1d(k)
f, status = lib.pk2d_eval_multi(self.psp, np.log(k_use),
a, cospass,
k_use.size, status)
check(status, cosmo)
return f
def __del__(self):
"""Free memory associated with this Pk2D structure
"""
if hasattr(self, 'has_psp'):
if self.has_psp and hasattr(self, 'psp'):
lib.f2d_t_free(self.psp)
| StarcoderdataPython |
3389269 | <reponame>wintercircle/django-easy-select2
from django.contrib import admin
from django import forms
from easy_select2 import select2_modelform
from .models import Note, Category
class NoteAdmin(admin.ModelAdmin):
form = select2_modelform(Note)
admin.site.register(Category)
admin.site.register(Note, NoteAdmin)
| StarcoderdataPython |
3235791 | <filename>mindspore/common/parameter.py<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameter for cell."""
from copy import copy
import numpy as np
from .initializer import initializer
from .tensor import Tensor
from .._checkparam import _check_str_by_regular
from ..parallel._utils import _set_clone_info, _CloneInfo
__all__ = ['Parameter', 'ParameterTuple']
PARAMETER_NAME_DEFAULT = "Parameter"
PARAMETER_NAME_PREFIX_MAX_LEN = 1024
def _check_type(x):
"""Check input data type"""
if not isinstance(x, Parameter):
raise ValueError("Should be `Parameter` collection.")
return True
class Parameter:
"""
Parameter types of cell models.
Note:
Each parameter of Cell is represented by Parameter class.
Args:
default_input (Tensor): A parameter tensor.
name (str): Name of the child parameter.
requires_grad (bool): True if the parameter requires gradient. Default: True.
layerwise_parallel (bool): A kind of model parallel mode. When layerwise_parallel is true in paralle mode,
broadcast and gradients communication would not be applied on parameters. Default: False.
"""
def __init__(self, default_input, name, requires_grad=True, layerwise_parallel=False):
self.set_parameter_data(default_input)
self.name = name
self.requires_grad = requires_grad
self.layerwise_parallel = layerwise_parallel
self._is_init = False
self.clone_info = _CloneInfo()
def __repr__(self):
format_str = 'Parameter (name={name})'
return format_str.format(name=self._name)
def __parameter__(self):
"""For parse check."""
@property
def name(self):
"""Get the name of the parameter."""
return self._name
@name.setter
def name(self, name_):
"""
Define a name for the parameter.
Args:
name_ (`str` or `None`): The name of the parameter. When the parameter is None or an empty string,
the default value `PARAMETER_NAME_DEFAULT` is used.
"""
if name_ is None:
name_ = PARAMETER_NAME_DEFAULT
elif isinstance(name_, str):
name_ = name_.strip()
if name_ == '':
name_ = PARAMETER_NAME_DEFAULT
if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:
raise ValueError("The length of the '{}' name should be less than {}.".
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
else:
raise ValueError("The type of the name should be `str` or `None`.")
self._name = name_
@property
def is_init(self):
"""Get init status of the parameter."""
return self._is_init
@is_init.setter
def is_init(self, is_init_):
"""
Set init status of the parameter.
Args:
is_init_ (bool): The init status of the parameter.
"""
self._is_init = is_init_
def clone(self, prefix, init='same'):
"""
Clone the parameter.
Args:
prefix (str): Namespace of parameter.
init (Union[Tensor, str, Initializer, numbers.Number]): Initialize the shape of the parameter.
Default: 'same'.
Returns:
Parameter, a new parameter.
"""
_check_str_by_regular(prefix)
x = copy(self)
x.name = prefix + '.' + x.name
x.is_init = False
if init != 'same':
shape = self.default_input.shape()
dtype = self.default_input.dtype()
x.default_input = initializer(init, shape=shape, dtype=dtype)
x.clone_info = copy(self.clone_info)
_set_clone_info(self.clone_info, x.clone_info)
return x
@property
def layerwise_parallel(self):
return self._layerwise_parallel
@layerwise_parallel.setter
def layerwise_parallel(self, value=True):
if not isinstance(value, bool):
raise TypeError("`layerwise_parallel` parameter must be bool type")
self._layerwise_parallel = value
@property
def requires_grad(self):
"""Return whether the parameter requires gradient."""
return self._requires_grad
@requires_grad.setter
def requires_grad(self, value=True):
if not isinstance(value, bool):
raise TypeError("`requires_grad` parameter must be bool type")
self._requires_grad = value
@property
def data(self):
return self.default_input
def set_parameter_data(self, data):
if isinstance(data, (Tensor, list, int, float,
np.float16, np.float32, np.int32, np.int16, np.ndarray)) and not isinstance(data, bool):
if isinstance(data, Tensor):
# make a copy of Tensor to init the parameter
data = Tensor(data.asnumpy().copy())
self.default_input = data
else:
raise ValueError("Parameter data must be tensor or number.")
class ParameterTuple(tuple):
"""
Class for storing tuple of parameters.
Note:
Used to store the parameters of the network into the parameter tuple collection.
"""
def __new__(cls, iterable):
"""Create instance object of ParameterTuple."""
g = (x for x in iterable if _check_type(x))
return tuple.__new__(ParameterTuple, g)
def clone(self, prefix, init='same'):
"""
Clone the parameter.
Args:
prefix (str): Namespace of parameter.
init (str): Initialize the shape of the parameter. Default: 'same'.
Returns:
Tuple, the new Parameter tuple.
"""
_check_str_by_regular(prefix)
new = []
for x in self:
x1 = x.clone(prefix, init)
new.append(x1)
return ParameterTuple(new)
def __parameter_tuple__(self):
"""For parse check."""
| StarcoderdataPython |
4842707 | # -*- coding: utf-8 -*-
"""Console script for dmriprep."""
import sys
import click
from . import run
from . import io
import os
@click.command()
@click.option('--participant-label', help="The label(s) of the participant(s) that should be"
"analyzed. The label corresponds to"
"sub-<participant_label> from the BIDS spec (so it does"
"not include 'sub-'). If this parameter is not provided"
"all subjects will be analyzed. Multiple participants"
"can be specified with a space separated list.",
default=None
)
@click.argument('bids_dir',
)
@click.argument('output_dir',
)
@click.argument('analysis_level',
type=click.Choice(['participant', 'group']),
default='participant')
def main(participant_label, bids_dir, output_dir, analysis_level="participant"):
"""
BIDS_DIR: The directory with the input dataset formatted according to the BIDS standard.
OUTPUT_DIR: The directory where the output files should be stored.
If you are running a group level analysis, this folder
should be prepopulated with the results of
the participant level analysis.
ANALYSIS_LEVEL: Level of the analysis that will be performed. Multiple
participant level analyses can be run independently
(in parallel).
"""
if analysis_level is not 'participant':
raise NotImplementedError('The only valid analysis level for dmriprep is participant at the moment.')
inputs = io.get_bids_files(participant_label, bids_dir)
for subject_inputs in inputs:
run.run_dmriprep_pe(**subject_inputs,
working_dir=os.path.join(output_dir, 'scratch'),
out_dir=output_dir)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| StarcoderdataPython |
1675434 | <filename>homeassistant/components/yale_smart_alarm/entity.py
"""Base class for yale_smart_alarm entity."""
from homeassistant.const import CONF_NAME, CONF_USERNAME
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MANUFACTURER, MODEL
from .coordinator import YaleDataUpdateCoordinator
class YaleEntity(CoordinatorEntity[YaleDataUpdateCoordinator], Entity):
"""Base implementation for Yale device."""
def __init__(self, coordinator: YaleDataUpdateCoordinator, data: dict) -> None:
"""Initialize an Yale device."""
super().__init__(coordinator)
self._attr_name: str = data["name"]
self._attr_unique_id: str = data["address"]
self._attr_device_info: DeviceInfo = DeviceInfo(
name=self._attr_name,
manufacturer=MANUFACTURER,
model=MODEL,
identifiers={(DOMAIN, data["address"])},
via_device=(DOMAIN, self.coordinator.entry.data[CONF_USERNAME]),
)
class YaleAlarmEntity(CoordinatorEntity[YaleDataUpdateCoordinator], Entity):
"""Base implementation for Yale Alarm device."""
def __init__(self, coordinator: YaleDataUpdateCoordinator) -> None:
"""Initialize an Yale device."""
super().__init__(coordinator)
panel_info = coordinator.data["panel_info"]
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, coordinator.entry.data[CONF_USERNAME])},
manufacturer=MANUFACTURER,
model=MODEL,
name=coordinator.entry.data[CONF_NAME],
connections={(CONNECTION_NETWORK_MAC, panel_info["mac"])},
sw_version=panel_info["version"],
)
| StarcoderdataPython |
1649113 | <gh_stars>0
#!/usr/bin/env python3
"""
@author: <NAME>
@email: <EMAIL>
* CLASS MODULE *
Contains the core classes needed for the Water.py main file:
- Particle
- Molecule (inherits the particle class)
- Force (inherits the molecule class)
- IntegratorNH (inherits the particle class)
Latest update: July 12th 2021
"""
import numpy as np
from numba import njit
from numba import int32, float64
from numba import types, typed, typeof, deferred_type
from numba.experimental import jitclass
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""" PHYSICAL CONSTANTS """""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
KB = 1.38E-23
enot = 8.854E-12
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""" CLASS: PARTICLE """""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
specparticle = [
('ID', int32),
('mID', int32),
('type', int32),
('pos', float64[:]),
('vel', float64[:]),
('force', float64[:]),
('m', float64),
('charge', float64),
]
@jitclass(specparticle)
class Particle:
"""Particle class containing positions, velocities, mass, particle-type"""
def __init__(self,position, velocity, mass, type, charge = 0, id = 0):
self.ID = id
self.mID = 0 #molecule ID (if particle is put into molecule)
self.type = type
self.pos = position
self.vel = velocity
self.force = np.zeros(3)+1
self.m = mass
self.charge = charge
def kinetic_energy(self):
kinetic = self.vel[0]**2 + self.vel[1]**2 + self.vel[2]**2
return kinetic
def norm(self):
return np.linalg.norm(self.pos)
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""" CLASS: WATER MOLECULE """""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
specmolecule = [
('ID', int32),
('oxygen', Particle.class_type.instance_type),
('hydrogen1', Particle.class_type.instance_type),
('hydrogen2', Particle.class_type.instance_type),
('angle', float64),
('bond_len', float64),
]
@jitclass(specmolecule)
class Water_Molecule:
"""Water Molecule class containing two hydrogen and one oxygen"""
def __init__(self, id):
self.ID = id
self.angle = 0
self.bond_len = 0
def load_atoms(self, oxygen, hydrogen1, hydrogen2):
"""Loads the molecule with the input atoms. Atoms must be
altready fully defined (no further manipulation needed)"""
self.hydrogen1 = hydrogen1
self.hydrogen2 = hydrogen2
self.oxygen = oxygen
def distribute_atoms(self, rzero, azero):
"""Distributes the hydrogen atoms spatially with respect
to the central oxygen atom"""
#set same position
self.hydrogen1.pos[0] = np.random.uniform(-1.,1.)
self.hydrogen1.pos[1] = np.random.uniform(-1.,1.)
self.hydrogen1.pos[2] = np.random.uniform(-1.,1.)
#rotate second hydrogen position with respect to oxygen of azero degrees
sign = np.random.choice(np.array([-1,1]))
c = np.cos(sign*azero)
s = np.sin(sign*azero)
Rx = np.array(((1,0,0),(0,c,-s),(0,s,c)), dtype = float64)
Ry = np.array(((c,0,s),(0,1,0),(-s,0,c)), dtype = float64)
Rz = np.array(((c,-s,0),(s,c,0),(0,0,1)), dtype = float64)
axis = np.random.choice(np.array((0,1,2)))
if axis == 0: self.hydrogen2.pos = Rx @ self.hydrogen1.pos
elif axis == 1: self.hydrogen2.pos = Ry @ self.hydrogen1.pos
else: self.hydrogen2.pos = Rz @ self.hydrogen1.pos
#normalize positions so that the bond lenght is correct
self.hydrogen1.pos *= rzero/np.linalg.norm(self.hydrogen1.pos)
self.hydrogen2.pos *= rzero/np.linalg.norm(self.hydrogen2.pos)
def get_bond_len(self):
rijx = self.oxygen.pos[0] - self.hydrogen1.pos[0]
rijy = self.oxygen.pos[1] - self.hydrogen1.pos[1]
rijz = self.oxygen.pos[2] - self.hydrogen1.pos[2]
rij = np.sqrt(rijx*rijx + rijy*rijy + rijz*rijz)
rijx = self.oxygen.pos[0] - self.hydrogen2.pos[0]
rijy = self.oxygen.pos[1] - self.hydrogen2.pos[1]
rijz = self.oxygen.pos[2] - self.hydrogen2.pos[2]
rij += np.sqrt(rijx*rijx + rijy*rijy + rijz*rijz)
return rij/2
def get_distance_OH(self, id):
if id == 1: dist = self.hydrogen1.pos - self.oxygen.pos
else: dist = self.hydrogen2.pos - self.oxygen.pos
return dist
def get_angle(self):
v1 = self.hydrogen1.pos/np.linalg.norm(self.hydrogen1.pos)
v2 = self.hydrogen2.pos/np.linalg.norm(self.hydrogen2.pos)
dot = np.dot(v1, v2)
if dot > 1.0:
dot = 1.0
elif dot < -1.0:
dot = -1.0
else:
dot = dot
self.angle = np.arccos(dot)
return self.angle
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""" CLASS: FORCES """""""""""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
specforce = [
('kbond', float64),
('kangle', float64),
('rnot', float64),
('anot', float64),
('sigma', float64),
('rcut', float64),
('epsilon', float64),
('molecules', types.ListType(Water_Molecule.class_type.instance_type)),
('N', int32)
]
@jitclass(specforce)
class Force:
def __init__(self, kbond, kangle, rnot, anot, sigma, epsilon, rcut):
self.kbond = kbond
self.kangle = kangle
self.rnot = rnot
self.anot = np.radians(anot)
self.sigma = sigma
self.rcut = rcut
self.epsilon = epsilon
self.N = 0
def add_molecules(self, molecules):
self.molecules = molecules
self.N = len(self.molecules)
def initialize_forces(self):
for i in range(self.N):
self.molecules[i].oxygen.force = np.zeros(3)
self.molecules[i].hydrogen1.force = np.zeros(3)
self.molecules[i].hydrogen2.force = np.zeros(3)
def intra_molecular(self):
_bond_force(self.molecules, self.N, self.kbond, self.rnot)
# Currently not working properly -> uncomment to see the effect on the bond len.
_angle_force(self.molecules, self.N, self.kangle, self.anot)
def inter_molecular(self):
_inter_force(self.molecules, self.N, self.sigma, self.rcut, self.epsilon)
@njit()
def _bond_force(molecules, n_molecules, kbond, rnot):
"""Computes the bond contribution to the intra-molecular force"""
for i in range(n_molecules):
#force between O and H1
r = molecules[i].get_distance_OH(1)
dist = np.linalg.norm(r)
f = kbond*(dist - rnot)*r/dist
molecules[i].oxygen.force -= f
molecules[i].hydrogen1.force += f
#force between O and H2
r = molecules[i].get_distance_OH(2)
dist = np.linalg.norm(r)
f = kbond*(dist - rnot)*r/dist
molecules[i].oxygen.force -= f
molecules[i].hydrogen2.force += f
@njit()
def _angle_force(molecules, n_molecules, kangle, anot):
"""Computes the angle contribution to the intra-molecular force"""
for i in range(n_molecules):
# define unit vectors
if(np.linalg.norm(molecules[i].hydrogen1.pos) <= 0): print("error")
if(np.linalg.norm(molecules[i].hydrogen2.pos) <= 0): print("error")
# direction of the forces
d1 = np.cross(molecules[i].hydrogen1.pos,np.cross(molecules[i].hydrogen1.pos,molecules[i].hydrogen2.pos))
d2 = np.cross(-molecules[i].hydrogen2.pos, np.cross(molecules[i].hydrogen1.pos,molecules[i].hydrogen2.pos))
d1 = d1/np.linalg.norm(d1)
d2 = d2/np.linalg.norm(d2)
# define dtheta/dr
dtheta1 = 1/np.linalg.norm(molecules[i].hydrogen1.pos)
dtheta2 = 1/np.linalg.norm(molecules[i].hydrogen2.pos)
# get angle between vectors
angle = molecules[i].get_angle()
# get forces
f1 = - kangle*(angle - anot)*dtheta1*d1
f2 = - kangle*(angle - anot)*dtheta2*d2
molecules[i].hydrogen1.force += f1
molecules[i].hydrogen2.force += f2
molecules[i].oxygen.force -= f1 + f2
#molecules[i].oxygen.force += f*dv1 - f*dv2
#molecules[i].oxygen.force += f*dv1/np.linalg.norm(molecules[i].hydrogen1.pos) + f*dv2/np.linalg.norm(molecules[i].hydrogen2.pos)
@njit()
def _inter_force(molecules, n_molecules, sigma, rcut, e):
"""Computes the inter-molecular force between oxygen atoms, using a Lennard Jones
12-6 potential and as well as a electrostatic Coulombic potential"""
for i in range(n_molecules-1):
for j in range(i+1, n_molecules):
rx = molecules[i].oxygen.pos[0] - molecules[j].oxygen.pos[0]
ry = molecules[i].oxygen.pos[1] - molecules[j].oxygen.pos[1]
rz = molecules[i].oxygen.pos[2] - molecules[j].oxygen.pos[2]
r = rx*rx + ry*ry + rz*rz
if(r < rcut*rcut):
fx = 48 * e * (sigma**12 * rx / r**7 - 0.5 * sigma**6 * rx / r**4)
fy = 48 * e * (sigma**12 * ry / r**7 - 0.5 * sigma**6 * ry / r**4)
fz = 48 * e * (sigma**12 * rz / r**7 - 0.5 * sigma**6 * rz / r**4)
molecules[i].oxygen.force[0] += fx
molecules[j].oxygen.force[0] -= fx
molecules[i].oxygen.force[1] += fy
molecules[j].oxygen.force[1] -= fy
molecules[i].oxygen.force[2] += fz
molecules[j].oxygen.force[2] -= fz
# Coulomb interaction missing
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"""" CLASS: INTEGRATOR """""""""""""""""""""""""""""""""""
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
specintegrator = [
('dt', float64), # a simple scalar field
('n', int32),
('m', int32),
('T', float64),
('Q', float64),
('xi', float64),
('lns', float64),
('system_particles', types.ListType(Particle.class_type.instance_type)), # an array field
]
@jitclass(specintegrator)
class IntegratorNH:
def __init__(self, timestep, temp):
self.dt = timestep
self.n = 0
self.m = 50
self.T = temp
self.Q = 0
self.xi = 0
self.lns = 0
def add_particles(self, particles):
self.system_particles = particles
self.n = len(self.system_particles)
self.Q = (3*self.n + 1) * KB * self.dt * self.T * self.m * self.m
def half_step(self):
for i in range(self.n):
kinetic = 0
# update half velocity
self.system_particles[i].vel[0] = (self.system_particles[i].vel[0] + 0.5 * self.system_particles[i].force[0] * self.dt / self.system_particles[i].m) / (1 + self.xi * self.dt/2)
self.system_particles[i].vel[1] = (self.system_particles[i].vel[1] + 0.5 * self.system_particles[i].force[1] * self.dt / self.system_particles[i].m) / (1 + self.xi * self.dt/2)
self.system_particles[i].vel[2] = (self.system_particles[i].vel[2] + 0.5 * self.system_particles[i].force[2] * self.dt / self.system_particles[i].m) / (1 + self.xi * self.dt/2)
# update position
self.system_particles[i].pos[0] += self.system_particles[i].vel[0] * self.dt
self.system_particles[i].pos[1] += self.system_particles[i].vel[1] * self.dt
self.system_particles[i].pos[2] += self.system_particles[i].vel[2] * self.dt
# kinetic energy
kinetic += 0.5 * self.system_particles[i].m * (self.system_particles[i].vel[0]**2 + self.system_particles[i].vel[1]**2 + self.system_particles[i].vel[2]**2)
# G factor
G = (2*kinetic - 3*self.n*KB*self.T)/self.Q
self.lns += self.xi * self.dt + 0.5 * G * self.dt * self.dt
self.xi += G*self.dt
def full_step(self):
for i in range(self.n):
# updates velocities to full step
self.system_particles[i].vel[0] += 0.5*self.dt*(self.system_particles[i].force[0]/self.system_particles[i].m - self.xi*self.system_particles[i].vel[0])
self.system_particles[i].vel[1] += 0.5*self.dt*(self.system_particles[i].force[1]/self.system_particles[i].m - self.xi*self.system_particles[i].vel[1])
self.system_particles[i].vel[2] += 0.5*self.dt*(self.system_particles[i].force[2]/self.system_particles[i].m - self.xi*self.system_particles[i].vel[2])
| StarcoderdataPython |
4828514 | class Solution:
"""
https://leetcode.com/problems/majority-element/
Given an array of size n, find the majority element. The majority
element is the element that appears more than ⌊ n/2 ⌋ times.
You may assume that the array is non-empty and the majority element
always exist in the array.
"""
@staticmethod
def majorityElement(nums):
"""
:type nums: List[int]
:rtype: int
"""
# # Version 1 -> 89 ms
# from collections import Counter
#
# return Counter(nums).most_common(1)[0][0]
# # Version 2 -> 62ms
# majority = len(nums) // 2
# seen = {}
# for num in nums:
# try:
# seen[num] += 1
# except KeyError:
# seen[num] = 1
# return next(k for k, v in seen.items() if v > majority)
# Version 3 -> 62 ms
count = majority = 0
for num in nums:
if count == 0:
majority = num
if majority == num:
count += 1
else:
count -= 1
# count += 1 if majority == num else -1
return majority
| StarcoderdataPython |
185194 | <gh_stars>10-100
import json
import os
import subprocess
import sys
import glob
import time
import webbrowser
import serial.tools.list_ports
import socket
import netifaces
from random import randrange
print("WUTUP!!!")
def cls():
os.system('cls' if os.name=='nt' else 'clear')
def Diff(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
def printMessage(message):
# print()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(message)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print()
# def install(package):
# print("Installing " + package)
# # printMessage("Installing " + package)
# subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# # print()
def list_serial_options(isPort):
ports = serial.tools.list_ports.comports()
available_ports = []
available_names = []
for p in ports:
available_ports.append(p.device)
available_names.append(str(p))
if isPort:
return available_ports
else:
return available_names
def writeWifiJSON(ssid, password, host):
wifiJSON = {
"ssid": ssid,
"password": password,
"host": host
}
f = open("wifi.json", "w")
f.write(json.dumps(wifiJSON))
f.close()
def loading(loadingMessage, seconds):
block = "#"
progress = ""
for x in range(seconds * 4):
printMessage(loadingMessage)
progress = progress + block
print(progress)
time.sleep(0.25)
cls()
def Subnet2CIDR(argument):
switcher = {
"255.255.255.255": "/32",
"255.255.255.254": "/31",
"255.255.255.252": "/30",
"255.255.255.248": "/29",
"255.255.255.240": "/28",
"255.255.255.224": "/27",
"255.255.255.192": "/26",
"255.255.255.128": "/25",
"255.255.255.0": "/24",
"255.255.254.0": "/22",
"255.255.248.0": "/21",
"255.255.240.0": "/20",
"255.255.224.0": "/19",
"255.255.192.0": "/18",
"255.255.128.0": "/17",
"255.255.0.0": "/16",
"255.254.0.0": "/15",
"255.252.0.0": "/14",
"255.248.0.0": "/13",
"255.240.0.0": "/12",
"255.224.0.0": "/11",
"255.192.0.0": "/10",
"255.128.0.0": "/9",
"255.0.0.0": "/8",
"254.0.0.0": "/7",
"252.0.0.0": "/6",
"248.0.0.0": "/5",
"240.0.0.0": "/4",
"192.168.127.12": "/3",
"192.0.0.0": "/2",
"172.16.31.10": "/1",
"0.0.0.0": "/0",
}
return switcher.get(argument, "Invalid Subnet Mask")
def CreatePyKilnShortcut(host, ip):
desktop = ""
if sys.platform == 'darwin':
desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')
else:
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
shortcutName = "PyKiln-" + ip + "-(" + str(randrange(99)) + ").url"
shortcutURL = ""
if host == "":
shortcutURL = "http://pykiln.com/"
else:
shortcutURL = "http://" + host
f = open(os.path.join(desktop, shortcutName), "w")
f.write("[InternetShortcut]\n")
f.write("URL=" + shortcutURL + "?ip=" + ip + "\n")
f.write("\n")
f.close()
def OpenBrowser(ipURL):
if sys.platform == 'darwin': # in case of OS X
subprocess.Popen(['open', ipURL])
else:
webbrowser.open(ipURL, new=0, autoraise=True)
# ip = open("ip.txt", "r")
# ipURL = "http://" + ip.read()
# print("IP = " + ipURL)
def GetIP():
myIP = socket.gethostbyname(socket.gethostname())
interface_list = netifaces.interfaces()
# Get addresses, netmask, etc. information
address_entries = (netifaces.ifaddresses(iface) for iface in interface_list)
address_entries2 = (netifaces.ifaddresses(iface) for iface in interface_list)
# Only pay attention to ipv4 address types
ipv4_address_entries = (address[netifaces.AF_INET] for address in address_entries if netifaces.AF_INET in address)
ipv4_address_entries2 = (address2[netifaces.AF_INET] for address2 in address_entries2 if netifaces.AF_INET in address2)
# Since multiple addresses can be associated, only look at the first ip address
ipv4_addresses = [address[0]['addr'] for address in ipv4_address_entries]
dns = [address2[0]['netmask'] for address2 in ipv4_address_entries2]
print(ipv4_addresses)
print(dns)
ipCIDRList = []
ipList = []
for checkIP, checkDNS in zip(ipv4_addresses, dns):
print("{}, {}".format(checkIP, checkDNS))
if checkIP != "127.0.0.1":
ipCIDRList.append(checkIP + Subnet2CIDR(checkDNS))
ipCIDRString = ','.join(map(str,ipCIDRList))
print(ipCIDRString)
OpenBrowser("http://pykiln.com/?devices=" + ipCIDRString)
CreatePyKilnShortcut("", "192.168.50.45") | StarcoderdataPython |
3211548 | <filename>simulation_site/simulation/models.py
from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class Resource(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class CostFunction(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
RANGE_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
range_function = models.CharField(max_length=2, choices=RANGE_CHOICES, default=CONTINOUS)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ContinuousCostFunction(models.Model):
id = models.AutoField(primary_key=True)
costfunction = models.ForeignKey('CostFunction')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class ProbabilityDistribution(models.Model):
CONTINOUS = 'C'
DISCRETE = 'D'
DOMAIN_CHOICES = (
(CONTINOUS, 'Continous'),
(DISCRETE, 'Discrete'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class_name = models.CharField(max_length=60)
domain = models.CharField(max_length=2, choices=DOMAIN_CHOICES, default=CONTINOUS)
def get_absolute_url(self):
return reverse('probabilities-view', kwargs={'pk': self.id})
def __str__(self): # Python 3: def __str__(self):
return self.name
class DiscreteProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
value = models.FloatField(default=0)
label = models.CharField(max_length=60, blank=True)
probability = models.FloatField(default=0,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
class ContinuousProbabilityDistribution(models.Model):
id = models.AutoField(primary_key=True)
probability_id = models.ForeignKey('ProbabilityDistribution')
parameter = models.CharField(max_length=60)
value = models.FloatField(default=0)
class Unit(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
symbol =models.CharField(max_length=3)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class DecisionVariable(models.Model):
MAXIMIZE = 'M'
MINIMIZE = 'L'
OPT_CHOICES = (
(MAXIMIZE, 'Maximize'),
(MINIMIZE, 'Minimize'),
)
QUALITY = 'Q'
PRICE = 'P'
MOD_CHOICES = (
(QUALITY, 'Quality'),
(PRICE, 'Price'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
optimization = models.CharField(max_length=2, choices=OPT_CHOICES, default=MAXIMIZE)
min_value = models.FloatField(default=0)
max_value = models.FloatField(default=0)
modeling = models.CharField(max_length=2, choices=MOD_CHOICES, default=QUALITY)
resource = models.ForeignKey('Resource')
unit = models.ForeignKey('Unit')
sensitivity_distribution = models.ForeignKey('ProbabilityDistribution', related_name='sensitivity')
value_distribution = models.ForeignKey('ProbabilityDistribution', related_name='value')
cost_function = models.ForeignKey('CostFunction', related_name='cost')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service(models.Model):
FILE = 'F'
DATABASE = 'D'
CONVERTER_CHOICES = (
(DATABASE, 'Database'),
(FILE, 'File'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
file_name_demand = models.CharField(max_length=100, verbose_name= 'Demand file')
converter_origin = models.CharField(max_length=1, choices=CONVERTER_CHOICES, default=DATABASE)
file_name_converter = models.CharField(max_length=100, verbose_name= 'Traffic converter')
decision_variables = models.ManyToManyField(DecisionVariable, through='Service_DecisionVariable')
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Service_DecisionVariable(models.Model):
id_service = models.ForeignKey(Service)
id_decision_variable = models.ForeignKey(DecisionVariable)
def __unicode__(self): # Python 3: def __str__(self):
return str(self.id_decision_variable)
class Service_Relationship(models.Model):
MAX_AGGREGATION = 'M'
MIN_AGGREGATION = 'N'
SUM_AGGREGATION = 'S'
NON_AGGREGATION = 'X'
AGGREGATION_FUNC_CHOICES = (
(MAX_AGGREGATION, 'Max Aggregation'),
(MIN_AGGREGATION, 'Min Aggregation'),
(SUM_AGGREGATION, 'Sum Aggregation'),
(NON_AGGREGATION, 'Non Aggregation'),
)
id = models.AutoField(primary_key=True)
service_from = models.ForeignKey(Service, related_name='service_from')
decision_variable_from = models.ForeignKey(DecisionVariable, related_name='decision_variable_from')
service_to = models.ForeignKey(Service, related_name='service_to')
decision_variable_to = models.ForeignKey(DecisionVariable, related_name='decision_variable_to')
aggregation = models.CharField(max_length=1,
choices=AGGREGATION_FUNC_CHOICES,
default=SUM_AGGREGATION)
def __unicode__(self): # Python 3: def __str__(self):
return '(' + self.service_from.name + ',' +self.decision_variable_from.name + ')' + ' TO ' + '(' + self.service_to.name + ',' + self.decision_variable_to.name + ')'
class Provider(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
BULK = 'G'
BID_BY_BID = 'B'
PROV_CAPC_CHOICES = (
(BULK, 'Bulk Controlled'),
(BID_BY_BID, 'Bid Controlled'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
market_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
adaptation_factor = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
num_ancestors = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(10)]
)
debug = models.BooleanField(default = False)
service = models.ForeignKey(Service)
monopolist_position = models.FloatField(default=0,
blank=False,
validators=[MaxValueValidator(1),
MinValueValidator(0)])
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class_name = models.CharField(max_length=60)
start_from_period = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999) ]
)
buying_marketplace_address = models.CharField(max_length=45)
selling_marketplace_address = models.CharField(max_length=45)
capacity_controlled_at = models.CharField(max_length=1,
choices=PROV_CAPC_CHOICES,
default=BULK)
purchase_service = models.ForeignKey(Service, related_name='purchase_service', blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Provider_Resource(models.Model):
provider = models.ForeignKey(Provider)
resource = models.ForeignKey(Resource)
capacity = models.FloatField(default=0)
cost = models.FloatField(default=0)
service = models.ForeignKey(Service)
class offeringData(models.Model):
DECISION_VARIABLES = 'D'
CALCULATED_FIELD = 'C'
OFF_CHOICES = (
(DECISION_VARIABLES, 'Decision Variable'),
(CALCULATED_FIELD, 'Calculated Field'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
type = models.CharField(max_length=1, choices=OFF_CHOICES, default=DECISION_VARIABLES)
decision_variable = models.ForeignKey(DecisionVariable, blank=True, null=True)
function = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Graphic(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Axis_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
x_axis = models.ForeignKey(offeringData, related_name='x_axis')
y_axis = models.ForeignKey(offeringData, related_name='y_axis')
detail = models.BooleanField(default = True)
label = models.ForeignKey(offeringData, related_name='label', blank=True, null=True)
color = models.ForeignKey(offeringData, related_name='color', blank=True, null=True)
column1 = models.ForeignKey(offeringData, related_name='column1', blank=True, null=True)
column2 = models.ForeignKey(offeringData, related_name='column2', blank=True, null=True)
column3 = models.ForeignKey(offeringData, related_name='column3', blank=True, null=True)
column4 = models.ForeignKey(offeringData, related_name='column4', blank=True, null=True)
class Provider_Graphic(models.Model):
id = models.AutoField(primary_key=True)
graphic = models.ForeignKey(Graphic)
class_name = models.CharField(max_length=60)
class Presenter(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class Presenter_Graphic(models.Model):
presenter = models.ForeignKey(Presenter)
graphic = models.ForeignKey(Graphic)
class Consumer(models.Model):
id = models.AutoField(primary_key=True)
observartions = models.TextField(blank=True)
number_execute = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
seed = models.BooleanField(default = False)
year = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(9999)]
)
month = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(12)
]
)
day = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(1),
MaxValueValidator(31) ]
)
hour = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(24) ]
)
minute = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
second = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(59) ]
)
microsecond = models.IntegerField(default=0,
blank=False,
validators=[MinValueValidator(0),
MaxValueValidator(999999) ]
)
class ConsumerService(models.Model):
id = models.AutoField(primary_key=True)
consumer = models.ForeignKey(Consumer)
service = models.ForeignKey(Service)
average = models.FloatField(default=0)
variance = models.FloatField(default=0)
market_potential = models.FloatField(default=0)
execute = models.BooleanField(default = False)
class ExecutionGroup(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
def __unicode__(self): # Python 3: def __str__(self):
return self.name
class ExecutionConfiguration(models.Model):
ACTIVE = 'A'
INACTIVE = 'I'
PROV_STAT_CHOICES = (
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
)
id = models.AutoField(primary_key=True)
status = models.CharField(max_length=1,
choices=PROV_STAT_CHOICES,
default=ACTIVE)
description = models.TextField(blank=True)
execution_group = models.ForeignKey(ExecutionGroup)
number_consumers = models.IntegerField(default=1, blank=False)
number_periods = models.IntegerField(default=1, blank=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.execution_group.name + ' ' + str(self.id)
class ExecutionConfigurationProviders(models.Model):
id = models.AutoField(primary_key=True)
execution_configuration = models.ForeignKey(ExecutionConfiguration)
provider = models.ForeignKey(Provider)
class GeneralParameters(models.Model):
id = models.AutoField(primary_key=True)
bid_periods = models.IntegerField(default=10, blank=False)
pareto_fronts_to_exchange = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1)]
)
initial_offer_number = models.IntegerField(default=1,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
num_periods_market_share = models.IntegerField(default=3,
blank=False,
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
| StarcoderdataPython |
3301437 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
# Generated by Django 3.1.4 on 2021-03-03 07:31
import core.utils.common
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def rename_disabled_to_off0006(apps, schema_editor):
OrganizationMember = apps.get_model('organizations', 'OrganizationMember')
OrganizationMember.objects.filter(role="Disabled").update(role="Off")
migrations.AlterField(
model_name='organizationmember',
name='role',
field=models.CharField(
choices=[('Administrator', 'Administrator'), ('Manager', 'Manager'), ('Coordinator', 'Coordinator'),
('Collaborator', 'Collaborator'), ('Not Activated', 'Not Activated'), ('Off', 'Off')],
default='Not Activated', help_text='Organization membership role', max_length=100),
)
def rename_disabled_to_off0007(apps, schema_editor):
OrganizationMember = apps.get_model('organizations', 'OrganizationMember')
OrganizationMember.objects.filter(role="Off").update(role="Deactivated")
migrations.AlterField(
model_name='organizationmember',
name='role',
field=models.CharField(
choices=[('Administrator', 'Administrator'), ('Manager', 'Manager'), ('Coordinator', 'Coordinator'),
('Collaborator', 'Collaborator'), ('Not Activated', 'Not Activated'),
('Deactivated', 'Deactivated')],
default='Not Activated', help_text='Organization membership role', max_length=100),
)
class Migration(migrations.Migration):
replaces = [('organizations', '0001_initial'), ('organizations', '0002_organization_token'), ('organizations', '0003_auto_20200418_0202'), ('organizations', '0004_auto_20200501_1751'), ('organizations', '0005_auto_20200811_2313'), ('organizations', '0006_auto_20200923_1423'), ('organizations', '0007_auto_20200923_2200'), ('organizations', '0008_auto_20201005_1552')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000, verbose_name='organization title')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
('created_by', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='organization', to=settings.AUTH_USER_MODEL, verbose_name='created_by')),
],
options={
'db_table': 'organization',
},
),
migrations.CreateModel(
name='OrganizationMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('Administrator', 'Administrator'), ('Manager', 'Manager'), ('Coordinator', 'Coordinator'), ('Collaborator', 'Collaborator'), ('Not Activated', 'Not Activated'), ('Disabled', 'Disabled')], default='Not Activated', max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organizations.organization')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='om_through', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='organization',
name='users',
field=models.ManyToManyField(related_name='organizations', through='organizations.OrganizationMember', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='organization',
name='token',
field=models.CharField(blank=True, default=core.utils.common.create_hash, max_length=256, null=True, unique=True, verbose_name='token'),
),
migrations.AlterField(
model_name='organizationmember',
name='organization',
field=models.ForeignKey(help_text='Organization ID', on_delete=django.db.models.deletion.CASCADE, to='organizations.organization'),
),
migrations.AlterField(
model_name='organizationmember',
name='role',
field=models.CharField(choices=[('Administrator', 'Administrator'), ('Manager', 'Manager'), ('Coordinator', 'Coordinator'), ('Collaborator', 'Collaborator'), ('Not Activated', 'Not Activated'), ('Disabled', 'Disabled')], default='Not Activated', help_text='Organization membership role', max_length=100),
),
migrations.AlterField(
model_name='organizationmember',
name='user',
field=models.ForeignKey(help_text='User ID', on_delete=django.db.models.deletion.CASCADE, related_name='om_through', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='organization',
name='default_role',
field=models.CharField(default='Not Activated', help_text='Default membership role for invited users', max_length=100),
),
migrations.RunPython(
code=rename_disabled_to_off0006,
),
migrations.RunPython(
code=rename_disabled_to_off0007,
),
migrations.AlterField(
model_name='organizationmember',
name='role',
field=models.CharField(choices=[('Administrator', 'Administrator'), ('Manager', 'Manager'), ('Coordinator', 'Coordinator'), ('Collaborator', 'Collaborator'), ('Not Activated', 'Not Activated'), ('Deactivated', 'Deactivated')], default='Not Activated', help_text='Organization membership role', max_length=100),
),
]
| StarcoderdataPython |
36522 | import tensorflow as tf
from KENN2.layers.residual.KnowledgeEnhancer import KnowledgeEnhancer
class Kenn(tf.keras.layers.Layer):
def __init__(self, predicates, clauses, activation=lambda x: x, initial_clause_weight=0.5, save_training_data=False, **kwargs):
"""Initialize the knowledge base.
:param predicates: a list of predicates names
:param clauses: a list of constraints. Each constraint is a string on the form:
clause_weight:clause
The clause_weight should be either a real number (in such a case this value is fixed) or an underscore
(in this case the weight will be a tensorflow variable and learned during training).
The clause must be represented as a list of literals separated by commas (that represent disjunctions).
Negation must specified by adding the letter 'n' before the predicate name.
An example:
_:nDog,Animal
"""
super(Kenn, self).__init__(**kwargs)
self.predicates = predicates
self.clauses = clauses
self.activation = activation
self.initial_clause_weight = initial_clause_weight
self.save_training_data = save_training_data
self.knowledge_enhancer = None
def build(self, input_shape):
"""Build the layer
:param input_shape: the input shape
"""
self.knowledge_enhancer = KnowledgeEnhancer(
self.predicates, self.clauses, self.initial_clause_weight, self.save_training_data)
super(Kenn, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Improve the satisfaction level of a set of clauses.
:param inputs: the tensor containing predicates' pre-activation values for many entities
:return: final preactivations"""
if self.save_training_data:
deltas, deltas_list = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas), deltas_list
else:
deltas = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas)
def get_config(self):
config = super(Kenn, self).get_config()
config.update({'predicates': self.predicates})
config.update({'clauses': self.clauses})
config.update({'activation': self.activation})
config.update({'initial_clause_weight': self.initial_clause_weight})
# config['output_size'] = # say self. _output_size if you store the argument in __init__
return config
| StarcoderdataPython |
1679126 | # Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
from Elasticity.ConstitutiveRelations.IsotropicHomogeneous import (
youngs_modulus, poisson_ratio)
def displacement(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
local_youngs_modulus = youngs_modulus(bulk_modulus, shear_modulus)
local_poisson_ratio = poisson_ratio(bulk_modulus, shear_modulus)
prefactor = 12. * bending_moment / (local_youngs_modulus * height**3)
return np.array([
-prefactor * x[0] * x[1], prefactor / 2. *
(x[0]**2 + local_poisson_ratio * x[1]**2 - length**2 / 4.)
])
def strain(x, length, height, bending_moment, bulk_modulus, shear_modulus):
local_youngs_modulus = youngs_modulus(bulk_modulus, shear_modulus)
local_poisson_ratio = poisson_ratio(bulk_modulus, shear_modulus)
prefactor = 12. * bending_moment / (local_youngs_modulus * height**3)
result = np.zeros((2, 2))
result[0, 0] = -prefactor * x[1]
result[1, 1] = prefactor * local_poisson_ratio * x[1]
return result
def minus_stress(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
return -np.array([[12. * bending_moment / height**3 * x[1], 0], [0, 0]])
def potential_energy_density(x, length, height, bending_moment, bulk_modulus,
shear_modulus):
local_strain = strain(x, length, height, bending_moment, bulk_modulus,
shear_modulus)
local_minus_stress = minus_stress(x, length, height, bending_moment,
bulk_modulus, shear_modulus)
return 0.5 * np.einsum('ij,ij', local_strain, local_minus_stress)
def source(x):
return np.zeros(x.shape)
| StarcoderdataPython |
1620475 | <filename>cogs/misc.py<gh_stars>0
import discord
from discord.ext import commands
import random
import typing
from .utils import ids
from .utils.lists import weapons, adjectives
from .utils.helper import split_to_shorter_parts
class MiscCog(commands.Cog, name="Misc"):
def __init__(self, bot):
self.bot = bot
async def is_in_sendou_server(ctx):
return (
ctx.message.guild and ctx.message.guild.id == ids.SENDOU_SERVER_ID
) or ctx.message.author.id == ids.OWNER_ID
async def is_in_plusone(ctx):
return (
ctx.message.guild and ctx.message.guild.id == ids.PLUSONE_SERVER_ID
) or ctx.message.author.id == ids.OWNER_ID
async def can_create_color_roles(ctx):
if not ctx.message.guild:
return False
if ctx.message.guild.id == ids.PLUSONE_SERVER_ID:
for role in ctx.message.author.roles:
if role.name == "Plus 1":
return True
return False
if ctx.message.guild.id == ids.SENDOU_SERVER_ID:
for role in ctx.message.author.roles:
if role.name in ["Twitch Subscriber", "Staff", "Nitro Booster"]:
return True
return False
return False
@commands.command()
async def ping(self, ctx):
"""
Bot's latency. Normally below 200ms.
"""
ping = round(self.bot.latency * 1000)
await ctx.send(f"My ping is {ping}ms")
@commands.command(name="give")
@commands.check(is_in_sendou_server)
async def give_or_remove_role(
self, ctx, role: typing.Optional[discord.Role] = None
):
"""
Gives or takes away a role (case sensitive).
Use !give to view all the roles that are available.
"""
roles_available = ["Tournament", "Content", "Jury"]
if not role:
roles_string = "\n".join(roles_available)
return await ctx.send(f"Roles available:\n{roles_string}")
if role.name not in roles_available:
return await ctx.send(
"That role isn't available. Use `.give` to get a list of all the available roles"
)
if role in ctx.message.author.roles:
await ctx.message.author.remove_roles(role)
return await ctx.send(
f"{role.name} succesfully removed from {ctx.message.author.name}"
)
await ctx.message.author.add_roles(role)
await ctx.send(f"{role.name} succesfully added to {ctx.message.author.name}")
@commands.command(name="color")
@commands.check(can_create_color_roles)
async def give_or_edit_color_role(
self, ctx, color: typing.Optional[discord.Color] = None, *role_name
):
"""
Gives or modifies a color role.
Example usage: !color #6A7E25 my cool role name
"""
if not color:
return await ctx.send(
"Valid color not provided. Example usage: `.color #6A7E25`"
)
if len(role_name) == 0:
name = ctx.message.author.name
else:
name = " ".join(role_name)
if len(name) > 50:
return await ctx.send(
f"Max character count for the role to be given is 50. The name for the role you gave was {len(name)} characters long"
)
for role in ctx.message.author.roles:
if "!" in role.name:
await role.edit(name=f"{name}!", color=color)
return await ctx.send(f"Edited the role, {ctx.message.author.name}!")
created_role = await ctx.message.guild.create_role(name=f"{name}!", color=color)
await ctx.message.author.add_roles(created_role)
await ctx.send(f"Enjoy your new color, {ctx.message.author.name}!")
@commands.command(name="whoami")
async def tell_them_how_it_is(self, ctx):
"""
Learn something about yourself.
"""
did = ctx.message.author.id
random.seed(did)
adjective = random.choice(adjectives)
random.seed(did)
weapon = random.choice(weapons)
await ctx.send(
f"{adjective.capitalize()} {weapon} main. That is who you are, {ctx.message.author.name}"
)
def setup(bot):
bot.add_cog(MiscCog(bot))
| StarcoderdataPython |
3329775 | <reponame>khchine5/vilma
# -*- coding: UTF-8 -*-
# Copyright 2017 <NAME>
# License: BSD (see file COPYING for details)
"""
Base Django settings for Lino Vilma applications.
"""
from __future__ import print_function
from __future__ import unicode_literals
from lino.projects.std.settings import *
from lino.api.ad import _
from lino_vilma import SETUP_INFO
class Site(Site):
verbose_name = "Lino Vilma"
version = SETUP_INFO['version']
url = "http://vilma.lino-framework.org/"
demo_fixtures = ['std', 'demo', 'demo2', 'checkdata']
# 'linotickets',
# 'tractickets', 'luc']
# project_model = 'tickets.Project'
# project_model = 'deploy.Milestone'
textfield_format = 'html'
user_types_module = 'lino_vilma.lib.vilma.user_types'
workflows_module = 'lino_vilma.lib.vilma.workflows'
obj2text_template = "**{0}**"
default_build_method = 'appyodt'
# experimental use of rest_framework:
# root_urlconf = 'lino_book.projects.team.urls'
# migration_class = 'lino_vilma.lib.vilma.migrate.Migrator'
auto_configure_logger_names = "atelier django lino lino_xl lino_noi"
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
# yield 'lino.modlib.extjs'
# yield 'lino.modlib.bootstrap3'
# yield 'lino.modlib.gfks'
# yield 'lino.modlib.system'
# yield 'lino.modlib.users'
yield 'lino_vilma.lib.contacts'
# yield 'lino_xl.lib.online.users'
yield 'lino_xl.lib.cal'
yield 'lino_xl.lib.extensible'
yield 'lino_xl.lib.addresses'
yield 'lino_xl.lib.phones'
yield 'lino_noi.lib.courses'
# yield 'lino_noi.lib.products'
yield 'lino_xl.lib.topics'
# yield 'lino_xl.lib.votes'
# yield 'lino_noi.lib.tickets'
yield 'lino_xl.lib.skills'
# yield 'lino_xl.lib.deploy'
# yield 'lino_noi.lib.working'
yield 'lino_xl.lib.lists'
yield 'lino_xl.lib.blogs'
# yield 'lino.modlib.changes'
# yield 'lino.modlib.notify'
# yield 'lino.modlib.uploads'
# yield 'lino_xl.lib.outbox'
yield 'lino_xl.lib.excerpts'
yield 'lino.modlib.export_excel'
# yield 'lino.modlib.tinymce'
yield 'lino.modlib.checkdata'
# yield 'lino.modlib.smtpd'
yield 'lino.modlib.weasyprint'
# yield 'lino_xl.lib.appypod'
# yield 'lino.modlib.wkhtmltopdf'
yield 'lino.modlib.comments'
yield 'lino.modlib.dashboard'
# yield 'lino.modlib.awesomeuploader'
# yield 'lino_noi.lib.noi'
yield 'lino_vilma.lib.vilma'
# yield 'lino_xl.lib.inbox'
# yield 'lino_xl.lib.mailbox'
# yield 'lino_xl.lib.meetings'
def setup_plugins(self):
super(Site, self).setup_plugins()
# self.plugins.comments.configure(
# commentable_model='tickets.Ticket')
# self.plugins.skills.configure(
# demander_model='contacts.Person')
# self.plugins.addresses.configure(
# partner_model='contacts.Person')
# self.plugins.skills.configure(
# demander_model='tickets.Ticket')
# self.plugins.tickets.configure(
# site_model='cal.Room',
# milestone_model='courses.Course')
def get_default_required(self, **kw):
# overrides the default behaviour which would add
# `auth=True`. In Lino Noi everybody can see everything.
return kw
def setup_quicklinks(self, user, tb):
super(Site, self).setup_quicklinks(user, tb)
tb.add_action(self.models.contacts.Persons)
tb.add_action(self.models.contacts.Companies)
# tb.add_action(self.modules.deploy.MyMilestones)
# tb.add_action(self.models.tickets.MyTickets)
# tb.add_action(self.models.tickets.TicketsToTriage)
# tb.add_action(self.models.tickets.TicketsToTalk)
# tb.add_action(self.modules.tickets.TicketsToDo)
a = self.models.users.MySettings.default_action
tb.add_instance_action(
user, action=a, label=_("My settings"))
# handler = self.action_call(None, a, dict(record_id=user.pk))
# handler = "function(){%s}" % handler
# mysettings = dict(text=_("My settings"),
# handler=js_code(handler))
# def get_dashboard_items(self, user):
# """Defines the story to be displayed on the admin main page.
# """
# for i in super(Site, self).get_dashboard_items(user):
# yield i
# yield self.models.lists.Lists
# the following line should not be active in a checked-in version
#~ DATABASES['default']['NAME'] = ':memory:'
USE_TZ = True
# TIME_ZONE = 'Europe/Brussels'
# TIME_ZONE = 'Europe/Tallinn'
TIME_ZONE = 'UTC'
| StarcoderdataPython |
3282098 | ''' Game of Life '''
class Game:
def __init__(self):
self.states = list()
def new(self, seed):
self.irange = len(seed) # convas i
self.jrange = len(seed[0]) # convas j
self.states.append(seed) # first step is the seed itself
''' Run only one step forward '''
def run_one_step(self):
state = self.states[-1]
new_state = list()
for i in range(self.irange):
row = list()
for j in range(self.jrange):
new_cell_state = self.cell_next_state(state, i, j)
row.append(new_cell_state)
new_state.append(row)
self.states.append(new_state)
#print(new_state)
''' Run the game '''
def run(self, steps=1):
#current_steps = len(self.states)
for step in range(steps):
self.run_one_step()
''' Count live neighbouring cells '''
def count_live_neighbours(self, state, i, j):
neighbours = list()
try:
neighbours.append(state[i-1][j-1])
except:
pass
try:
neighbours.append(state[i][j-1])
except:
pass
try:
neighbours.append(state[i+1][j-1])
except:
pass
try:
neighbours.append(state[i-1][j])
except:
pass
try:
neighbours.append(state[i+1][j])
except:
pass
try:
neighbours.append(state[i-1][j+1])
except:
pass
try:
neighbours.append(state[i][j+1])
except:
pass
try:
neighbours.append(state[i+1][j+1])
except:
pass
result = 0
for neighbour in neighbours:
if neighbour == 1: # Alive
result += 1
#print("i:", i)
#print("j:", j)
#print("neighbours:", result)
return result
''' Next cell state '''
def cell_next_state(self, state, i, j):
next_cell_state = state[i][j] # default state
count = self.count_live_neighbours(state, i, j)
if state[i][j] == 1: # Alive
if count < 2:
next_cell_state = 0
elif count == 2 or count == 3:
next_cell_state = 1
elif count > 3:
next_cell_state = 0
elif state[i][j] == 0: # Dead
if count == 3:
next_cell_state = 1
return next_cell_state
''' Print states '''
def print(self, step='all'):
import numpy as np
if step == 'all':
for matrix in self.states:
print(np.matrix(matrix))
print('\n')
else:
for step_number in range(step):
print(np.matrix(self.states[step_number]))
print('\n')
''' Show as plot '''
def show(self, step=0):
from GoL.graphics import show
show(self.states[step], name="step:"+str(step))
''' Save states to file '''
def save(self, file_name='sample'):
file_name += '.txt'
with open(file_name, 'w') as f:
for step in range(len(self.states)):
f.writelines('step:'+str(step)+'\n')
for i in range(self.irange):
line = ''
for j in range(self.jrange):
line += str(self.states[step][i][j])
line += ','
line += '\n'
f.writelines(line)
''' Load states from file '''
def load(self, file_name='sample'):
file_name += '.txt'
with open(file_name, 'r') as f:
content = f.read()
#print(content)
states = list()
content_splits = content.split('step:')
content_splits = content_splits[1:]
#print(content_splits)
for step_content in content_splits:
step_content_split = step_content.split('\n')
#step = step_content_split[0]
step_content_split = step_content_split[1:-1]
#print(step_content_split)
state = list()
for row_content in step_content_split:
row_content_split = row_content.split(',')
row_content_split = row_content_split[:-1]
#print(row_content_split)
row = list()
for item in row_content_split:
row.append(int(item))
state.append(row)
states.append(state)
#print(states)
self.states = states
seed = self.states[0]
self.irange = len(seed) # convas i
self.jrange = len(seed[0]) # convas j
def animate(self, name='sample', format='gif'):
from GoL.graphics import animate
animate(self.states, name, format)
if __name__ == "__main__":
from GoL import seeds
#seed = seeds.library['blinker']
seed = seeds.random(50, 50)
G = Game()
G.load()
#G.new(seed)
#G.run(50)
#G.run()
#G.print(2)
#G.show(2)
#G.show(-1)
#G.save()
G.animate(format='gif')
| StarcoderdataPython |
1798478 | # coding=utf-8
class Config:
INPUT_DIR = "input"
STOCK_ID_NAME_MAP_SHA = "input/common/stock_id_name_map/sha"
STOCK_ID_NAME_MAP_SZ = "input/common/stock_id_name_map/sz"
STOCK_ID_NAME_MAP_OPEN = "input/common/stock_id_name_map/open"
CURRENT_HOLDED_PATH = "input/holded"
STOCKS_PATH = "input/stocks"
SELECTED_FILE = "selected"
SELECTED_PATH = "input/selected/selected"
OPTION_HOLDED = 1
OPTION_SELECTED = 2
OUTPUT = "output/"
OUTPUT_PRICELIST = "output/pricelist"
#持续增长考察年份数
CONTINUE_GROW_YEARS = 6
#持续增长容许最多亏损
ALLOW_MAX_LOSS = -0.10
ADDINGS = {"profit2", "average profit2", "in report", "adjacent 365"}
def __init__(self):
pass
| StarcoderdataPython |
3314132 | <reponame>YeffyCodeGit/LoginManager<filename>main.py<gh_stars>1-10
import re
import hashlib
def add(name, password):
with open('users.txt', 'a') as f:
f.write(f'{name}:{password}\n')
def view():
with open('users.txt', 'r') as f:
print('-------------------- USERS --------------------')
lines = f.readlines()
for line in lines:
user = re.split(':', line)
print(f"Username: {user[0]}, Password: {user[1].rstrip()}")
print('-----------------------------------------------')
def purge():
with open('users.txt', 'w') as f:
f.write('') # write an empty string instead of appending, to delete everything
def login():
name = input("Username: ")
password = input("Password: ")
hashed_pass = hashlib.sha256(password.encode()).hexdigest()
with open('users.txt', 'r') as f:
lines = f.readlines()
for line in lines:
user = re.split(':', line)
if user[0] == name and user[1].rstrip() == hashed_pass:
print(f"\n\nWelcome {name}!\n\n")
break
else:
print("\n\nInvalid username or password.\n\n")
def main():
master_pass = input("Enter master password: ")
if master_pass != "password":
print("Incorrect password")
exit()
while True:
print("Please select an option: ")
print("0) Quit")
print("1) Add new user ")
print("2) View all users ")
print("3) Login to user ")
print("4) Purge user database ")
option = int(input("> "))
if option > 3:
print("\n\nInvalid option, try again. Use 0 to quit.\n\n")
continue
elif option == 0:
print("Exiting...")
exit()
elif option == 1:
name = input("Username: ")
password = input("Password: ")
hashed_pass = hashlib.sha256(password.encode()).hexdigest()
print(f'Adding user {name}:{hashed_pass}')
add(name, hashed_pass)
elif option == 2:
view()
elif option == 3:
login()
elif option == 4:
purge()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1677156 | import json
from datetime import datetime
from instagram_private_api import (Client, ClientError, ClientLoginError, ClientCookieExpiredError,
ClientLoginRequiredError, __version__ as client_version)
import sys
import os.path
from utils.utils import *
def onlogin_callback(api, new_settings_file):
cache_settings = api.settings
with open(new_settings_file, 'w') as outfile:
json.dump(cache_settings, outfile, default=to_json)
print('SAVED: {0!s}'.format(new_settings_file))
def login(username, password, path_cookie):
print('Client version: {0!s}'.format(client_version))
device_id = None
try:
if not os.path.isfile(path_cookie):
api = Client(username, password, on_login=lambda x: onlogin_callback(x, "insta_cookie"))
else:
with open(path_cookie) as file_data:
cached_settings = json.load(file_data, object_hook=from_json)
print('Reusing settings: {0!s}'.format(path_cookie))
device_id = cached_settings.get('device_id')
# reuse auth settings
api = Client(username, password, settings=cached_settings)
except (ClientCookieExpiredError, ClientLoginRequiredError) as e:
print('ClientCookieExpiredError/ClientLoginRequiredError: {0!s}'.format(e))
api = Client(username, password, device_id=device_id, on_login=lambda x: onlogin_callback(x, "insta_cookie"))
except ClientLoginError as e:
print('ClientLoginError {0!s}'.format(e))
sys.exit()
except ClientError as e:
print('ClientError {0!s} (Code: {1:d}, Response: {2!s})'.format(e.msg, e.code, e.error_response))
sys.exit()
except Exception as e:
print('Unexpected Exception: {0!s}'.format(e))
sys.exit()
# Show when login expires
cookie_expiry = api.cookie_jar.auth_expires
print('Cookie Expiry: {0!s}'.format(datetime.fromtimestamp(cookie_expiry).strftime('%Y-%m-%dT%H:%M:%SZ')))
print("\n")
return api
| StarcoderdataPython |
160027 | <gh_stars>10-100
#
# Beaglebone GPIO output pin driver
#
# Author: <NAME>
# Copyright (c) 2015, Semcon Sweden AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Semcon Sweden AB nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# For Python 2.7 and up (incl. 3.x)
#
FILE_FOR_GPIO_EXPORT = '/sys/class/gpio/export'
TEMPLATE_FOR_GPIO_PIN_DIRECTION_FILE = '/sys/class/gpio/gpio{}/direction'
TEMPLATE_FOR_GPIO_PIN_VALUE_FILE = '/sys/class/gpio/gpio{}/value'
DIRECTION_OUT = 'out'
GPIO_STATE_ON = '1'
GPIO_STATE_OFF = '0'
MODE_FILE_WRITE = 'w'
import errno
class Outputpin(object):
"""GPIO output pin representation.
For controlling a GPIO output pin on a Beaglebone.
Note that root permissions are required.
Attributes:
* state (bool): Turn on the GPIO output pin if the value is True.
"""
def __init__(self, GPIO_number):
self._state = False
self._GPIO_number = GPIO_number
# Export the GPIO pin to Linux userspace
try:
with open(FILE_FOR_GPIO_EXPORT, MODE_FILE_WRITE) as f:
f.write(str(self._GPIO_number))
except IOError as e:
if e.errno != errno.EBUSY: # EBUSY: Pin is already exported.
raise
# Set pin in digital output mode
file_for_gpio_pin_direction = TEMPLATE_FOR_GPIO_PIN_DIRECTION_FILE.format(self._GPIO_number)
with open(file_for_gpio_pin_direction, MODE_FILE_WRITE) as f:
f.write(DIRECTION_OUT)
# Set initial state
self.state = False
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if value:
filetext = GPIO_STATE_ON
self._state = True
else:
filetext = GPIO_STATE_OFF
self._state = False
# Write pin value to the file-like driver interface
file_for_gpio_pin_value = TEMPLATE_FOR_GPIO_PIN_VALUE_FILE.format(self._GPIO_number)
with open(file_for_gpio_pin_value, MODE_FILE_WRITE) as f:
f.write(filetext)
########################################################
# Testing the module #
# #
# Connect a LED from port8 pin3 to ground via 470 Ohm. #
# The voltage is 3.3 V on the pin. #
# The output is gpmc_ad6 = GPIO1_6 #
# This is GPIO 38 (1*32 + 6) #
# #
########################################################
if __name__ == '__main__':
import time
pin = Outputpin(38)
pin.state = True
time.sleep(1)
pin.state = False
time.sleep(1)
pin.state = True
time.sleep(1)
pin.state = False
| StarcoderdataPython |
199441 | <gh_stars>0
"""
Test turning a bag into other forms.
"""
import simplejson
from tiddlyweb.serializer import Serializer
from tiddlyweb.model.bag import Bag
from tiddlyweb.config import config
from fixtures import bagfour, tiddler_collection, reset_textstore
def setup_module(module):
reset_textstore()
module.serializer = Serializer('text')
def test_generate_json():
serializer = Serializer('json')
bagfour.desc = 'a tasty little bag'
bagfour.policy.manage = ['NONE']
serializer.object = bagfour
string = serializer.to_string()
json = simplejson.loads(string)
assert json['policy']['manage'] == ['NONE']
assert json['desc'] == 'a tasty little bag'
def test_generated_string():
string = ''.join(serializer.list_tiddlers(tiddler_collection))
assert 'TiddlerOne' in string
assert 'TiddlerTwo' in string
assert 'TiddlerThree' in string
def test_generated_string_with_revbag():
tiddler_collection.is_revisions = True
string = ''.join(serializer.list_tiddlers(tiddler_collection))
# XXX: this 1 or 0 thing is the result of the fixture data not being
# safe across multiple test runs. The test is really just for
# the presence of the ':'
assert 'TiddlerOne:1' in string or 'TiddlerOne:0' in string
assert 'TiddlerTwo:1' in string or 'TiddlerTwo:0' in string
assert 'TiddlerThree:1' in string or 'TiddlerThree:0' in string
tiddler_collection.is_revisions = False
def test_generated_html():
html_serializer = Serializer('html')
string = html_serializer.list_tiddlers(tiddler_collection)
assert '<li><a href="/bags/bagfour/tiddlers/TiddlerOne">TiddlerOne</a></li>' in string
def test_generated_html_with_prefix():
new_config = config.copy()
new_config['server_prefix'] = '/salacious'
environ = {'tiddlyweb.config': new_config}
html_serializer = Serializer('html', environ)
string = html_serializer.list_tiddlers(tiddler_collection)
assert '<li><a href="/salacious/bags/bagfour/tiddlers/TiddlerOne">TiddlerOne</a></li>' in string
def test_generated_html_with_revbag():
html_serializer = Serializer('html')
tiddler_collection.is_revisions = True
string = html_serializer.list_tiddlers(tiddler_collection)
assert ('<li><a href="/bags/bagfour/tiddlers/TiddlerTwo/revisions/1">TiddlerTwo:1</a></li>'
in string or
'<li><a href="/bags/bagfour/tiddlers/TiddlerTwo/revisions/0">TiddlerTwo:0</a></li>'
in string)
tiddler_collection.is_revisions = False
def test_json_to_bag():
serializer = Serializer('json')
json_string = simplejson.dumps(dict(policy=dict(read=['user1'], manage=['NONE']), desc='simply the best'))
newbag = Bag('bagho')
serializer.object = newbag
serializer.from_string(json_string)
assert newbag.name == 'bagho'
assert newbag.policy.read == ['user1']
assert newbag.policy.manage == ['NONE']
assert newbag.desc == 'simply the best'
def test_text_list():
serializer = Serializer('text')
bags = [Bag('bag' + str(name)) for name in xrange(2)]
string = ''.join(serializer.list_bags(bags))
assert 'bag0' in string
assert 'bag1' in string
def test_html_list():
serializer = Serializer('html')
bags = [Bag('bag' + str(name)) for name in xrange(2)]
string = ''.join(serializer.list_bags(bags))
assert 'href="bags/bag0' in string
assert 'href="bags/bag1' in string
| StarcoderdataPython |
1701686 | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
tables_check="",
redshift_conn_id="",
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.tables_check = tables_check
self.redshift_conn_id = redshift_conn_id
def execute(self, context):
redshift_hook = PostgresHook(self.redshift_conn_id)
for table in self.tables_check:
self.log.info(f"Checking table {table}")
get_records = redshift_hook.get_records(f"SELECT COUNT(*) FROM {table}")
if len(get_records) < 1 or len(get_records[0]) < 1:
raise ValueError(f"Data quality check not successful. {table} returned no results")
num_records = get_records[0][0]
if num_records < 1:
raise ValueError(f"Data quality check not successful. {table} contained 0 rows")
self.log.info(f"Data quality on table {table} check successful")
| StarcoderdataPython |
1611646 | from PIL import ImageColor
PURPLE = 117, 112, 179
ORANGE = 217, 95, 2
GREEN = 27, 158, 119
def get_font_color(background_color):
if not isinstance(background_color, tuple):
background_color = ImageColor.getrgb(background_color)
# calculate perceptive luminance
r, g, b = background_color
luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
# bright color -> black font
if luminance > 0.5:
return "black"
# dark color -> white font
else:
return "white"
| StarcoderdataPython |
91831 | <reponame>titibike/PynamoDB
"""
An example using Amazon's Thread example for motivation
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SampleTablesAndData.html
"""
import logging
from pynamodb.models import Model
from pynamodb.attributes import (
UnicodeAttribute, NumberAttribute, UnicodeSetAttribute, UTCDateTimeAttribute
)
from datetime import datetime
logging.basicConfig()
log = logging.getLogger("pynamodb")
log.setLevel(logging.DEBUG)
log.propagate = True
class Thread(Model):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
table_name = "Thread"
host = "http://localhost:8000"
forum_name = UnicodeAttribute(hash_key=True)
subject = UnicodeAttribute(range_key=True)
views = NumberAttribute(default=0)
replies = NumberAttribute(default=0)
answered = NumberAttribute(default=0)
tags = UnicodeSetAttribute()
last_post_datetime = UTCDateTimeAttribute(null=True)
notes = ListAttribute(default=list)
# Delete the table
# print(Thread.delete_table())
# Create the table
if not Thread.exists():
Thread.create_table(wait=True)
# Create a thread
thread_item = Thread(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# try:
# Thread.get('does not', 'exist')
# except Thread.DoesNotExist:
# pass
# Save the thread
thread_item.save()
# Batch write operation
with Thread.batch_write() as batch:
threads = []
for x in range(100):
thread = Thread('forum-{0}'.format(x), 'subject-{0}'.format(x))
thread.tags = ['tag1', 'tag2']
thread.last_post_datetime = datetime.now()
threads.append(thread)
for thread in threads:
batch.save(thread)
# Get table count
print(Thread.count())
# Count based on a filter
print(Thread.count('forum-1'))
# Batch get
item_keys = [('forum-{0}'.format(x), 'subject-{0}'.format(x)) for x in range(100)]
for item in Thread.batch_get(item_keys):
print(item)
# Scan
for item in Thread.scan():
print(item)
# Query
for item in Thread.query('forum-1', Thread.subject.startswith('subject')):
print(item)
print("-"*80)
# A model that uses aliased attribute names
class AliasedModel(Model):
class Meta:
table_name = "AliasedModel"
host = "http://localhost:8000"
forum_name = UnicodeAttribute(hash_key=True, attr_name='fn')
subject = UnicodeAttribute(range_key=True, attr_name='s')
views = NumberAttribute(default=0, attr_name='v')
replies = NumberAttribute(default=0, attr_name='rp')
answered = NumberAttribute(default=0, attr_name='an')
tags = UnicodeSetAttribute(attr_name='t')
last_post_datetime = UTCDateTimeAttribute(attr_name='lp')
if not AliasedModel.exists():
AliasedModel.create_table(read_capacity_units=1, write_capacity_units=1, wait=True)
# Create a thread
thread_item = AliasedModel(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# Save the thread
thread_item.save()
# Batch write operation
with AliasedModel.batch_write() as batch:
threads = []
for x in range(100):
thread = AliasedModel('forum-{0}'.format(x), 'subject-{0}'.format(x))
thread.tags = ['tag1', 'tag2']
thread.last_post_datetime = datetime.now()
threads.append(thread)
for thread in threads:
batch.save(thread)
# Batch get
item_keys = [('forum-{0}'.format(x), 'subject-{0}'.format(x)) for x in range(100)]
for item in AliasedModel.batch_get(item_keys):
print("Batch get item: {0}".format(item))
# Scan
for item in AliasedModel.scan():
print("Scanned item: {0}".format(item))
# Query
for item in AliasedModel.query('forum-1', AliasedModel.subject.startswith('subject')):
print("Query using aliased attribute: {0}".format(item))
# Query with filters
for item in Thread.query('forum-1', (Thread.views == 0) | (Thread.replies == 0)):
print("Query result: {0}".format(item))
# Scan with filters
for item in Thread.scan(Thread.subject.startswith('subject') & (Thread.views == 0)):
print("Scanned item: {0} {1}".format(item.subject, item.views))
# Scan with null filter
for item in Thread.scan(Thread.subject.startswith('subject') & Thread.last_post_datetime.does_not_exist()):
print("Scanned item: {0} {1}".format(item.subject, item.views))
# Conditionally save an item
thread_item = Thread(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# DynamoDB will only save the item if forum_name exists
print(thread_item.save(Thread.forum_name.exists()))
# DynamoDB will update the item, by adding 1 to the views attribute,
# if the forum_name attribute equals 'Some Forum' or the subject attribute exists
print(thread_item.update(
actions=[
Thread.views.add(1)
],
condition=(
(Thread.forum_name == 'Some Forum') | Thread.subject.exists()
)
))
# DynamoDB will atomically update the attributes `replies` (increase value by 1),
# and `last_post_datetime` (set value to the current datetime)
print(thread_item.update(actions=[
Thread.replies.add(1),
Thread.last_post_datetime.set(datetime.now()),
]))
# DynamoDB will delete the item, only if the views attribute is equal to one
try:
print(thread_item.delete(Thread.views == 1))
except:
pass
# Remove an item's attribute
print(thread_item.update(actions=[
Thread.tags.remove()
]))
# Update list attribute
print(thread_item.update(actions=[
Thread.notes.set(
Thread.notes.append(["new note"])
)
]))
# Backup/restore example
# Print the size of the table
print("Table size: {}".format(Thread.describe_table().get('ItemCount')))
# Dump the entire table to a file
Thread.dump('thread.json')
# Optionally Delete all table items
# Commented out for safety
# for item in Thread.scan():
# item.delete()
print("Table size: {}".format(Thread.describe_table().get('ItemCount')))
# Restore table from a file
Thread.load('thread.json')
print("Table size: {}".format(Thread.describe_table().get('ItemCount')))
# Dump the entire table to a string
serialized = Thread.dumps()
# Load the entire table from a string
Thread.loads(serialized)
| StarcoderdataPython |
3279294 | from ConfigParser import ConfigParser
import os
def get_user_pass(cred_profile, fpath=None):
config = ConfigParser()
if not fpath:
fpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'api_creds.cfg')
config.read(fpath)
user = config.get(cred_profile, 'user')
password = config.get(cred_profile, 'password')
return user, password
def get_token(cred_profile, fpath=None):
config = ConfigParser()
if not fpath:
fpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'api_creds.cfg')
config.read(fpath)
return config.get(cred_profile, 'token')
| StarcoderdataPython |
19805 | <filename>shoptimizer_api/optimizers_builtin/condition_optimizer.py
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A module for Shoptimizer API that fixes invalid condition values.
Reference: https://support.google.com/merchants/answer/6324469
If the condition field is specified as "new", but other fields in the
product imply that the condition is otherwise, this optimizer will reset
the condition value to "used".
"""
import logging
from typing import Any, Dict, List, Set
from flask import current_app
from optimizers_abstract import base_optimizer
_NEW = 'new'
_USED = 'used'
class ConditionOptimizer(base_optimizer.BaseOptimizer):
"""An optimizer that fixes invalidly-set condition fields."""
_OPTIMIZER_PARAMETER = 'condition-optimizer'
_condition_config = None
def _optimize(self, product_batch: Dict[str, Any], language: str,
country: str, currency: str) -> int:
"""Runs the optimization.
Fixes invalid condition values.
See above for the definition of an invalid condition value.
Args:
product_batch: A batch of product data.
language: The language to use for this optimizer.
country: The country to use for this optimizer.
currency: The currency to use for this optimizer.
Returns:
The number of products affected by this optimization.
"""
num_of_products_optimized = 0
self._condition_config = current_app.config.get('CONFIGS', {}).get(
f'condition_optimizer_config_{language}', {})
for entry in product_batch['entries']:
product = entry['product']
google_product_category = product.get('googleProductCategory', '')
if self._is_google_product_category_excluded(google_product_category):
logging.info(
'Product ID: %s With Category %s was flagged for exclusion '
' of the condition check', product.get('offerId', ''),
google_product_category)
continue
used_tokens = set(
token.lower() for token in self._condition_config['used_tokens'])
logging.info('Used tokens were %s', used_tokens)
if product.get('condition', '') == _NEW:
# Category format must follow the official spec to be converted a list.
# Ref: https://support.google.com/merchants/answer/6324436?hl=en.
product_categories = google_product_category.split(' > ')
if isinstance(product_categories, list) and product_categories:
lowest_level_category = product_categories[-1]
category_specific_tokens = self._get_tokens_for_category(
lowest_level_category)
if category_specific_tokens:
category_specific_tokens = set(
token.lower() for token in category_specific_tokens)
used_tokens.update(category_specific_tokens)
# Search for used tokens in both title and description and reset the
# condition to used if any were detected.
product_title = product.get('title', '')
product_description = product.get('description', '')
if self._field_contains_used_tokens(
product_title, used_tokens) or self._field_contains_used_tokens(
product_description, used_tokens):
product['condition'] = _USED
logging.info('Modified item %s: Setting new product to used.',
product.get('offerId', ''))
num_of_products_optimized += 1
base_optimizer.set_optimization_tracking(product,
base_optimizer.SANITIZED)
return num_of_products_optimized
def _is_google_product_category_excluded(
self, google_product_category: str) -> bool:
"""Checks if the provided category was found in the exclusions config dict.
Args:
google_product_category: A string representing the product category.
Returns:
True if the given category was found in the condition config's list of
categories to exclude from being optimized for condition due to those
categories being at higher risk of containing false-positives.
"""
excluded_categories = self._condition_config.get(
'excluded_product_categories', [])
# Ensure that the exclude category from the config matches the product's
# category from the beginning of the string in order to support an entire
# category family being matched, as well as enforcing avoidance of unrelated
# matches if only a sub-category was specified.
return any(
google_product_category.startswith(category_to_exclude)
for category_to_exclude in excluded_categories)
def _field_contains_used_tokens(self, field_text: str,
used_tokens: Set[str]) -> bool:
"""Checks if the provided field contains any terms in the given set.
Args:
field_text: A string representing the value of a product field.
used_tokens: A set representing used condition indicators.
Returns:
True if any term was found in the target product field, otherwise False.
"""
return any(token in field_text.lower() for token in used_tokens)
def _get_tokens_for_category(self, product_category: str) -> List[str]:
"""Gets the values in a list of dictionaries if the provided category was found.
Args:
product_category: The product's lowest-level category.
Returns:
A list of the tokens of the matching category, or an empty list.
"""
category_mappings = self._condition_config['target_product_categories']
return category_mappings.get(product_category, [])
| StarcoderdataPython |
4834610 | """ Scripts for analyzing the results.
"""
import numpy as np
import pickle
from DataProcessor import DataProcessor
from utils_analysis import (
hc_analysis, plot_3d_B, differential_pathway, component_portion,
classify_patients, component_func, plot_phylo, plot_patients_F)
__author__ = "<NAME>"
# Load data
data_proc = DataProcessor()
df_gene = data_proc.load_top_gene_data(top_k=3000)
df_modu, len_kegg = data_proc.load_modu_data()
BCF = pickle.load(open( "data/ica/BCF.pkl", "rb" ))
B, C, F = BCF["B"], BCF["C"], BCF["F"]
CF = np.dot(C, F)
df_modu = df_modu.drop(labels=["Pathways in cancer", "Glioma", "Breast cancer"], axis=0)
# 5, 24, 25
len_kegg -= 3
B = np.delete(B, [5, 24, 25], axis=0)
C = np.delete(C, [5, 24, 25], axis=0)
CF = np.delete(CF, [5, 24, 25], axis=0)
# Hierarchical clustering.
x = df_modu.values
hc_analysis(x, df_modu.columns, feature="Pathway")
x = df_gene.values
hc_analysis(x, df_modu.columns, feature="Genes")
# Differentially expressed pathways.
differential_pathway(df_modu, len_kegg, pval_threshold=1.0)
# Plot the PCA of bulk data
plot_3d_B(B, data_name="Pathway")
# Portions of components
comp_p = component_portion(F, plot_mode=True)
# Classify components of patients
list_patterns = classify_patients(F, threshold_0=2.5e-2)
plot_patients_F(F, threshold_0=2.5e-2)
# Functions of components
component_func(B, C, F, list(df_modu.index), len_kegg, threshold=0.05)
# Phylogeny of components.
for pattern in list_patterns:
#pattern = list_patterns[0]
plot_phylo(C, F, list(df_modu.index), len_kegg, comp_p, pattern, threshold=0.05)
| StarcoderdataPython |
3251278 | <filename>Mesh/System/Entity/Concrete/Table.py
import numpy as np
from Mesh.System.Entity.Concrete import Concrete
from Mesh.System.SpaceFactor import MatterType
class Table(Concrete):
identifier = 'tables'
default_dimension = (5, 4, 4)
default_orientation = (0, 1, 0)
def __init__(self, uuid, dimension_x=(1, 1, 1), selected_functions=()):
super().__init__(uuid=uuid, dimension_x=dimension_x, matter_type=MatterType.WOOD,
selected_functions=selected_functions)
def get_default_shape(self):
dimension = type(self).default_dimension
shape = np.zeros(dimension)
shape[:, :, dimension[2] - 1:dimension[2]] = self.matter_type.value
shape[0:1, 0:1, 0:dimension[2] - 1] = self.matter_type.value
shape[dimension[0] - 1:dimension[0], 0:1, 0:dimension[2] - 1] = self.matter_type.value
shape[0:1, dimension[1] - 1:dimension[1], 0:dimension[2] - 1] = self.matter_type.value
shape[dimension[0] - 1:dimension[0], dimension[1] - 1:type(self).default_dimension[1],
0:dimension[2] - 1] = self.matter_type.value
return shape
def validate_functions(self, selected_functions):
return True
def define_functions(self, selected_functions):
pass
| StarcoderdataPython |
1752563 | # MINLP written by GAMS Convert at 04/21/18 13:54:16
#
# Equation counts
# Total E G L N X C B
# 202 152 0 50 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 384 144 240 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1513 1402 111 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x3 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x4 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x5 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x6 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x7 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x8 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x9 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x10 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x11 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x12 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x13 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x14 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x15 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x16 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x17 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x18 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x19 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x20 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x21 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x22 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x23 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x24 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x25 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x26 = Var(within=Reals,bounds=(0.1,3.4),initialize=0.1)
m.x27 = Var(within=Reals,bounds=(None,None),initialize=13.3333333333333)
m.x28 = Var(within=Reals,bounds=(None,None),initialize=7.66261028176921)
m.x29 = Var(within=Reals,bounds=(None,None),initialize=7.66261028176921)
m.x30 = Var(within=Reals,bounds=(None,None),initialize=7.66261028176921)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=7.66261028176921)
m.x32 = Var(within=Reals,bounds=(None,None),initialize=9.36329177569045)
m.x33 = Var(within=Reals,bounds=(None,None),initialize=9.36329177569045)
m.x34 = Var(within=Reals,bounds=(None,None),initialize=9.36329177569045)
m.x35 = Var(within=Reals,bounds=(None,None),initialize=9.36329177569045)
m.x36 = Var(within=Reals,bounds=(None,None),initialize=13.3333333333333)
m.x37 = Var(within=Reals,bounds=(None,None),initialize=13.3333333333333)
m.x38 = Var(within=Reals,bounds=(None,None),initialize=13.3333333333333)
m.x39 = Var(within=Reals,bounds=(None,None),initialize=13.3333333333333)
m.x40 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x41 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x43 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=5.52052447473883)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=7.49268649265355)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=7.49268649265355)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=7.49268649265355)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=7.49268649265355)
m.x52 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x53 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x54 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x55 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x56 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x66 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x67 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x69 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x70 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x72 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x74 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x76 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x77 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x78 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x80 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x82 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x84 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x86 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x87 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x88 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x89 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x90 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x91 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x92 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x93 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x94 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x95 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x96 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x97 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x98 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x99 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x100 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x101 = Var(within=Reals,bounds=(None,None),initialize=4)
m.x102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(-0.35,0.35),initialize=0)
m.x128 = Var(within=Reals,bounds=(-0.35,0.35),initialize=0)
m.x129 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(-0.35,0.35),initialize=0)
m.x131 = Var(within=Reals,bounds=(-0.35,0.35),initialize=0)
m.x132 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(None,None),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b210 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b211 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b212 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b213 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b214 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b215 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b216 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b217 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b218 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b219 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b220 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b221 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b222 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b223 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b224 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b225 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b226 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b227 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b228 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b229 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b230 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b231 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b232 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b233 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b234 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b235 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b236 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b237 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b238 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b239 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b240 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b241 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b242 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b243 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b244 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b245 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b246 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b247 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b248 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b249 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b250 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b251 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b252 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b253 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b254 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b255 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b256 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b257 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b258 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b259 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b260 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b261 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b292 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b293 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b294 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b295 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b296 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b297 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b298 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b299 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b300 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b301 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b302 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b303 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b304 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b305 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b306 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b307 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b308 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b309 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b310 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b311 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b312 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b313 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b314 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b315 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b316 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b317 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b318 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b319 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b320 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b321 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b322 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b323 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b324 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b325 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b326 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b327 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b328 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b329 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b330 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b331 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b332 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b333 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b334 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b335 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b336 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b337 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b338 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b339 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b340 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b341 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b342 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b343 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b344 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b345 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b346 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b347 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b348 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b349 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b350 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b351 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b352 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b353 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b354 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b355 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b356 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b357 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b358 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b359 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b360 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b361 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b362 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b363 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b364 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b365 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b366 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b367 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b368 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b369 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b370 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b371 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b372 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b373 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b374 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b375 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b376 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b377 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b378 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b379 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b380 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b381 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b382 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b383 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b384 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= 7.5*m.x2 + 13.0503831361382*m.x3 + 13.0503831361382*m.x4 + 13.0503831361382*m.x5
+ 13.0503831361382*m.x6 + 10.6800046816469*m.x7 + 10.6800046816469*m.x8 + 10.6800046816469*m.x9
+ 10.6800046816469*m.x10 + 7.5*m.x11 + 7.5*m.x12 + 7.5*m.x13 + 7.5*m.x14
+ 18.1142209327368*m.x15 + 18.1142209327368*m.x16 + 18.1142209327368*m.x17
+ 18.1142209327368*m.x18 + 18.1142209327368*m.x19 + 18.1142209327368*m.x20
+ 18.1142209327368*m.x21 + 18.1142209327368*m.x22 + 13.3463478150391*m.x23
+ 13.3463478150391*m.x24 + 13.3463478150391*m.x25 + 13.3463478150391*m.x26, sense=minimize)
m.c2 = Constraint(expr= - 133.333333333333*m.x2 + m.x27 == 0)
m.c3 = Constraint(expr= - 76.6261028176921*m.x3 + m.x28 == 0)
m.c4 = Constraint(expr= - 76.6261028176921*m.x4 + m.x29 == 0)
m.c5 = Constraint(expr= - 76.6261028176921*m.x5 + m.x30 == 0)
m.c6 = Constraint(expr= - 76.6261028176921*m.x6 + m.x31 == 0)
m.c7 = Constraint(expr= - 93.6329177569045*m.x7 + m.x32 == 0)
m.c8 = Constraint(expr= - 93.6329177569045*m.x8 + m.x33 == 0)
m.c9 = Constraint(expr= - 93.6329177569045*m.x9 + m.x34 == 0)
m.c10 = Constraint(expr= - 93.6329177569045*m.x10 + m.x35 == 0)
m.c11 = Constraint(expr= - 133.333333333333*m.x11 + m.x36 == 0)
m.c12 = Constraint(expr= - 133.333333333333*m.x12 + m.x37 == 0)
m.c13 = Constraint(expr= - 133.333333333333*m.x13 + m.x38 == 0)
m.c14 = Constraint(expr= - 133.333333333333*m.x14 + m.x39 == 0)
m.c15 = Constraint(expr= - 55.2052447473883*m.x15 + m.x40 == 0)
m.c16 = Constraint(expr= - 55.2052447473883*m.x16 + m.x41 == 0)
m.c17 = Constraint(expr= - 55.2052447473883*m.x17 + m.x42 == 0)
m.c18 = Constraint(expr= - 55.2052447473883*m.x18 + m.x43 == 0)
m.c19 = Constraint(expr= - 55.2052447473883*m.x19 + m.x44 == 0)
m.c20 = Constraint(expr= - 55.2052447473883*m.x20 + m.x45 == 0)
m.c21 = Constraint(expr= - 55.2052447473883*m.x21 + m.x46 == 0)
m.c22 = Constraint(expr= - 55.2052447473883*m.x22 + m.x47 == 0)
m.c23 = Constraint(expr= - 74.9268649265355*m.x23 + m.x48 == 0)
m.c24 = Constraint(expr= - 74.9268649265355*m.x24 + m.x49 == 0)
m.c25 = Constraint(expr= - 74.9268649265355*m.x25 + m.x50 == 0)
m.c26 = Constraint(expr= - 74.9268649265355*m.x26 + m.x51 == 0)
m.c27 = Constraint(expr= - 40*m.x2 + m.x52 == 0)
m.c28 = Constraint(expr= - 40*m.x3 + m.x54 == 0)
m.c29 = Constraint(expr= - 40*m.x4 + m.x56 == 0)
m.c30 = Constraint(expr= - 40*m.x5 + m.x58 == 0)
m.c31 = Constraint(expr= - 40*m.x6 + m.x60 == 0)
m.c32 = Constraint(expr= - 40*m.x7 + m.x62 == 0)
m.c33 = Constraint(expr= - 40*m.x8 + m.x64 == 0)
m.c34 = Constraint(expr= - 40*m.x9 + m.x66 == 0)
m.c35 = Constraint(expr= - 40*m.x10 + m.x68 == 0)
m.c36 = Constraint(expr= - 40*m.x11 + m.x70 == 0)
m.c37 = Constraint(expr= - 40*m.x12 + m.x72 == 0)
m.c38 = Constraint(expr= - 40*m.x13 + m.x74 == 0)
m.c39 = Constraint(expr= - 40*m.x14 + m.x76 == 0)
m.c40 = Constraint(expr= - 40*m.x15 + m.x78 == 0)
m.c41 = Constraint(expr= - 40*m.x16 + m.x80 == 0)
m.c42 = Constraint(expr= - 40*m.x17 + m.x82 == 0)
m.c43 = Constraint(expr= - 40*m.x18 + m.x84 == 0)
m.c44 = Constraint(expr= - 40*m.x19 + m.x86 == 0)
m.c45 = Constraint(expr= - 40*m.x20 + m.x88 == 0)
m.c46 = Constraint(expr= - 40*m.x21 + m.x90 == 0)
m.c47 = Constraint(expr= - 40*m.x22 + m.x92 == 0)
m.c48 = Constraint(expr= - 40*m.x23 + m.x94 == 0)
m.c49 = Constraint(expr= - 40*m.x24 + m.x96 == 0)
m.c50 = Constraint(expr= - 40*m.x25 + m.x98 == 0)
m.c51 = Constraint(expr= - 40*m.x26 + m.x100 == 0)
m.c52 = Constraint(expr= - 40*m.x2 + m.x53 == 0)
m.c53 = Constraint(expr= - 40*m.x3 + m.x55 == 0)
m.c54 = Constraint(expr= - 40*m.x4 + m.x57 == 0)
m.c55 = Constraint(expr= - 40*m.x5 + m.x59 == 0)
m.c56 = Constraint(expr= - 40*m.x6 + m.x61 == 0)
m.c57 = Constraint(expr= - 40*m.x7 + m.x63 == 0)
m.c58 = Constraint(expr= - 40*m.x8 + m.x65 == 0)
m.c59 = Constraint(expr= - 40*m.x9 + m.x67 == 0)
m.c60 = Constraint(expr= - 40*m.x10 + m.x69 == 0)
m.c61 = Constraint(expr= - 40*m.x11 + m.x71 == 0)
m.c62 = Constraint(expr= - 40*m.x12 + m.x73 == 0)
m.c63 = Constraint(expr= - 40*m.x13 + m.x75 == 0)
m.c64 = Constraint(expr= - 40*m.x14 + m.x77 == 0)
m.c65 = Constraint(expr= - 40*m.x15 + m.x79 == 0)
m.c66 = Constraint(expr= - 40*m.x16 + m.x81 == 0)
m.c67 = Constraint(expr= - 40*m.x17 + m.x83 == 0)
m.c68 = Constraint(expr= - 40*m.x18 + m.x85 == 0)
m.c69 = Constraint(expr= - 40*m.x19 + m.x87 == 0)
m.c70 = Constraint(expr= - 40*m.x20 + m.x89 == 0)
m.c71 = Constraint(expr= - 40*m.x21 + m.x91 == 0)
m.c72 = Constraint(expr= - 40*m.x22 + m.x93 == 0)
m.c73 = Constraint(expr= - 40*m.x23 + m.x95 == 0)
m.c74 = Constraint(expr= - 40*m.x24 + m.x97 == 0)
m.c75 = Constraint(expr= - 40*m.x25 + m.x99 == 0)
m.c76 = Constraint(expr= - 40*m.x26 + m.x101 == 0)
m.c77 = Constraint(expr=m.x27*(m.x130 - m.x127) - m.x102 == 0)
m.c78 = Constraint(expr=m.x28*(-0.574695771132691*m.x127 - 0.287347885566345*m.x128 + 0.766261028176921*m.x129 +
0.574695771132691*m.x136 + 0.287347885566345*m.x137 - 0.766261028176921*m.x138) - m.x103 == 0)
m.c79 = Constraint(expr=m.x29*(0.574695771132691*m.x130 - 0.287347885566345*m.x131 + 0.766261028176921*m.x132 -
0.574695771132691*m.x133 + 0.287347885566345*m.x134 - 0.766261028176921*m.x135) - m.x104 == 0)
m.c80 = Constraint(expr=m.x30*(0.287347885566345*m.x128 - 0.574695771132691*m.x127 + 0.766261028176921*m.x129 +
0.574695771132691*m.x139 - 0.287347885566345*m.x140 - 0.766261028176921*m.x141) - m.x105 == 0)
m.c81 = Constraint(expr=m.x31*(0.574695771132691*m.x130 + 0.287347885566345*m.x131 + 0.766261028176921*m.x132 -
0.574695771132691*m.x142 - 0.287347885566345*m.x143 - 0.766261028176921*m.x144) - m.x106 == 0)
m.c82 = Constraint(expr=m.x32*(0.936329177569045*m.x132 - 0.351123441588392*m.x131 + 0.351123441588392*m.x137 -
0.936329177569045*m.x138) - m.x107 == 0)
m.c83 = Constraint(expr=m.x33*(0.351123441588392*m.x131 + 0.936329177569045*m.x132 - 0.351123441588392*m.x140 -
0.936329177569045*m.x141) - m.x108 == 0)
m.c84 = Constraint(expr=m.x34*(0.936329177569045*m.x129 - 0.351123441588392*m.x128 + 0.351123441588392*m.x134 -
0.936329177569045*m.x135) - m.x109 == 0)
m.c85 = Constraint(expr=m.x35*(0.351123441588392*m.x128 + 0.936329177569045*m.x129 - 0.351123441588392*m.x143 -
0.936329177569045*m.x144) - m.x110 == 0)
m.c86 = Constraint(expr=m.x36*(m.x134 - m.x143) - m.x111 == 0)
m.c87 = Constraint(expr=m.x37*(m.x137 - m.x140) - m.x112 == 0)
m.c88 = Constraint(expr=m.x38*(m.x136 - m.x133) - m.x113 == 0)
m.c89 = Constraint(expr=m.x39*(m.x139 - m.x142) - m.x114 == 0)
m.c90 = Constraint(expr=m.x40*(0.345032779671177*m.x133 + 0.75907211527659*m.x134 + 0.552052447473883*m.x135) - m.x115
== 0)
m.c91 = Constraint(expr=m.x41*(0.345032779671177*m.x142 - 0.75907211527659*m.x143 + 0.552052447473883*m.x144) - m.x116
== 0)
m.c92 = Constraint(expr=m.x42*(0.75907211527659*m.x137 - 0.345032779671177*m.x136 + 0.552052447473883*m.x138) - m.x117
== 0)
m.c93 = Constraint(expr=m.x43*(-0.345032779671177*m.x139 - 0.75907211527659*m.x140 + 0.552052447473883*m.x141) - m.x118
== 0)
m.c94 = Constraint(expr=m.x44*(0.75907211527659*m.x136 - 0.345032779671177*m.x137 + 0.552052447473883*m.x138) - m.x119
== 0)
m.c95 = Constraint(expr=m.x45*(-0.75907211527659*m.x133 - 0.345032779671177*m.x134 + 0.552052447473883*m.x135) - m.x120
== 0)
m.c96 = Constraint(expr=m.x46*(0.75907211527659*m.x139 + 0.345032779671177*m.x140 + 0.552052447473883*m.x141) - m.x121
== 0)
m.c97 = Constraint(expr=m.x47*(0.345032779671177*m.x143 - 0.75907211527659*m.x142 + 0.552052447473883*m.x144) - m.x122
== 0)
m.c98 = Constraint(expr=m.x48*(0.468292905790847*m.x142 + 0.468292905790847*m.x143 + 0.749268649265355*m.x144) - m.x123
== 0)
m.c99 = Constraint(expr=m.x49*(0.468292905790847*m.x133 - 0.468292905790847*m.x134 + 0.749268649265355*m.x135) - m.x124
== 0)
m.c100 = Constraint(expr=m.x50*(-0.468292905790847*m.x136 - 0.468292905790847*m.x137 + 0.749268649265355*m.x138)
- m.x125 == 0)
m.c101 = Constraint(expr=m.x51*(0.468292905790847*m.x140 - 0.468292905790847*m.x139 + 0.749268649265355*m.x141) - m.x126
== 0)
m.c102 = Constraint(expr= - m.x102 - 0.574695771132691*m.x103 - 0.574695771132691*m.x105 == 1)
m.c103 = Constraint(expr= - 0.287347885566345*m.x103 + 0.287347885566345*m.x105 - 0.351123441588392*m.x109
+ 0.351123441588392*m.x110 == 10)
m.c104 = Constraint(expr= 0.766261028176921*m.x103 + 0.766261028176921*m.x105 + 0.936329177569045*m.x109
+ 0.936329177569045*m.x110 == -10)
m.c105 = Constraint(expr= m.x102 + 0.574695771132691*m.x104 + 0.574695771132691*m.x106 == 0)
m.c106 = Constraint(expr= - 0.287347885566345*m.x104 + 0.287347885566345*m.x106 - 0.351123441588392*m.x107
+ 0.351123441588392*m.x108 == 10)
m.c107 = Constraint(expr= 0.766261028176921*m.x104 + 0.766261028176921*m.x106 + 0.936329177569045*m.x107
+ 0.936329177569045*m.x108 == -10)
m.c108 = Constraint(expr= - 0.574695771132691*m.x104 - m.x113 + 0.345032779671177*m.x115 - 0.75907211527659*m.x120
+ 0.468292905790847*m.x124 == 0.5)
m.c109 = Constraint(expr= 0.287347885566345*m.x104 + 0.351123441588392*m.x109 + m.x111 + 0.75907211527659*m.x115
- 0.345032779671177*m.x120 - 0.468292905790847*m.x124 == 0)
m.c110 = Constraint(expr= - 0.766261028176921*m.x104 - 0.936329177569045*m.x109 + 0.552052447473883*m.x115
+ 0.552052447473883*m.x120 + 0.749268649265355*m.x124 == 0)
m.c111 = Constraint(expr= 0.574695771132691*m.x103 + m.x113 - 0.345032779671177*m.x117 + 0.75907211527659*m.x119
- 0.468292905790847*m.x125 == 0)
m.c112 = Constraint(expr= 0.287347885566345*m.x103 + 0.351123441588392*m.x107 + m.x112 + 0.75907211527659*m.x117
- 0.345032779671177*m.x119 - 0.468292905790847*m.x125 == 0)
m.c113 = Constraint(expr= - 0.766261028176921*m.x103 - 0.936329177569045*m.x107 + 0.552052447473883*m.x117
+ 0.552052447473883*m.x119 + 0.749268649265355*m.x125 == 0)
m.c114 = Constraint(expr= 0.574695771132691*m.x105 + m.x114 - 0.345032779671177*m.x118 + 0.75907211527659*m.x121
- 0.468292905790847*m.x126 == 0)
m.c115 = Constraint(expr= - 0.287347885566345*m.x105 - 0.351123441588392*m.x108 - m.x112 - 0.75907211527659*m.x118
+ 0.345032779671177*m.x121 + 0.468292905790847*m.x126 == 0)
m.c116 = Constraint(expr= - 0.766261028176921*m.x105 - 0.936329177569045*m.x108 + 0.552052447473883*m.x118
+ 0.552052447473883*m.x121 + 0.749268649265355*m.x126 == 0)
m.c117 = Constraint(expr= - 0.574695771132691*m.x106 - m.x114 + 0.345032779671177*m.x116 - 0.75907211527659*m.x122
+ 0.468292905790847*m.x123 == 0.6)
m.c118 = Constraint(expr= - 0.287347885566345*m.x106 - 0.351123441588392*m.x110 - m.x111 - 0.75907211527659*m.x116
+ 0.345032779671177*m.x122 + 0.468292905790847*m.x123 == 0)
m.c119 = Constraint(expr= - 0.766261028176921*m.x106 - 0.936329177569045*m.x110 + 0.552052447473883*m.x116
+ 0.552052447473883*m.x122 + 0.749268649265355*m.x123 == 0)
m.c120 = Constraint(expr= - m.x52 + m.x102 <= 0)
m.c121 = Constraint(expr= - m.x53 - m.x102 <= 0)
m.c122 = Constraint(expr= - m.x54 + m.x103 <= 0)
m.c123 = Constraint(expr= - m.x55 - m.x103 <= 0)
m.c124 = Constraint(expr= - m.x56 + m.x104 <= 0)
m.c125 = Constraint(expr= - m.x57 - m.x104 <= 0)
m.c126 = Constraint(expr= - m.x58 + m.x105 <= 0)
m.c127 = Constraint(expr= - m.x59 - m.x105 <= 0)
m.c128 = Constraint(expr= - m.x60 + m.x106 <= 0)
m.c129 = Constraint(expr= - m.x61 - m.x106 <= 0)
m.c130 = Constraint(expr= - m.x62 + m.x107 <= 0)
m.c131 = Constraint(expr= - m.x63 - m.x107 <= 0)
m.c132 = Constraint(expr= - m.x64 + m.x108 <= 0)
m.c133 = Constraint(expr= - m.x65 - m.x108 <= 0)
m.c134 = Constraint(expr= - m.x66 + m.x109 <= 0)
m.c135 = Constraint(expr= - m.x67 - m.x109 <= 0)
m.c136 = Constraint(expr= - m.x68 + m.x110 <= 0)
m.c137 = Constraint(expr= - m.x69 - m.x110 <= 0)
m.c138 = Constraint(expr= - m.x70 + m.x111 <= 0)
m.c139 = Constraint(expr= - m.x71 - m.x111 <= 0)
m.c140 = Constraint(expr= - m.x72 + m.x112 <= 0)
m.c141 = Constraint(expr= - m.x73 - m.x112 <= 0)
m.c142 = Constraint(expr= - m.x74 + m.x113 <= 0)
m.c143 = Constraint(expr= - m.x75 - m.x113 <= 0)
m.c144 = Constraint(expr= - m.x76 + m.x114 <= 0)
m.c145 = Constraint(expr= - m.x77 - m.x114 <= 0)
m.c146 = Constraint(expr= - m.x78 + m.x115 <= 0)
m.c147 = Constraint(expr= - m.x79 - m.x115 <= 0)
m.c148 = Constraint(expr= - m.x80 + m.x116 <= 0)
m.c149 = Constraint(expr= - m.x81 - m.x116 <= 0)
m.c150 = Constraint(expr= - m.x82 + m.x117 <= 0)
m.c151 = Constraint(expr= - m.x83 - m.x117 <= 0)
m.c152 = Constraint(expr= - m.x84 + m.x118 <= 0)
m.c153 = Constraint(expr= - m.x85 - m.x118 <= 0)
m.c154 = Constraint(expr= - m.x86 + m.x119 <= 0)
m.c155 = Constraint(expr= - m.x87 - m.x119 <= 0)
m.c156 = Constraint(expr= - m.x88 + m.x120 <= 0)
m.c157 = Constraint(expr= - m.x89 - m.x120 <= 0)
m.c158 = Constraint(expr= - m.x90 + m.x121 <= 0)
m.c159 = Constraint(expr= - m.x91 - m.x121 <= 0)
m.c160 = Constraint(expr= - m.x92 + m.x122 <= 0)
m.c161 = Constraint(expr= - m.x93 - m.x122 <= 0)
m.c162 = Constraint(expr= - m.x94 + m.x123 <= 0)
m.c163 = Constraint(expr= - m.x95 - m.x123 <= 0)
m.c164 = Constraint(expr= - m.x96 + m.x124 <= 0)
m.c165 = Constraint(expr= - m.x97 - m.x124 <= 0)
m.c166 = Constraint(expr= - m.x98 + m.x125 <= 0)
m.c167 = Constraint(expr= - m.x99 - m.x125 <= 0)
m.c168 = Constraint(expr= - m.x100 + m.x126 <= 0)
m.c169 = Constraint(expr= - m.x101 - m.x126 <= 0)
m.c170 = Constraint(expr= - m.x2 + 0.1*m.b145 + 0.2*m.b146 + 0.3*m.b147 + 0.4*m.b148 + 0.5*m.b149 + 0.6*m.b150
+ 0.7*m.b151 + 0.8*m.b152 + 0.9*m.b153 + m.b154 + 1.1*m.b155 + 1.2*m.b156 + 1.3*m.b157
+ 1.4*m.b158 + 1.5*m.b159 + 1.6*m.b160 + 1.7*m.b161 + 1.8*m.b162 + 1.9*m.b163 + 2*m.b164
+ 2.1*m.b165 + 2.2*m.b166 + 2.3*m.b167 + 2.4*m.b168 + 2.5*m.b169 + 2.6*m.b170 + 2.8*m.b171
+ 3*m.b172 + 3.2*m.b173 + 3.4*m.b174 == 0)
m.c171 = Constraint(expr= - m.x3 + 0.1*m.b175 + 0.2*m.b176 + 0.3*m.b177 + 0.4*m.b178 + 0.5*m.b179 + 0.6*m.b180
+ 0.7*m.b181 + 0.8*m.b182 + 0.9*m.b183 + m.b184 + 1.1*m.b185 + 1.2*m.b186 + 1.3*m.b187
+ 1.4*m.b188 + 1.5*m.b189 + 1.6*m.b190 + 1.7*m.b191 + 1.8*m.b192 + 1.9*m.b193 + 2*m.b194
+ 2.1*m.b195 + 2.2*m.b196 + 2.3*m.b197 + 2.4*m.b198 + 2.5*m.b199 + 2.6*m.b200 + 2.8*m.b201
+ 3*m.b202 + 3.2*m.b203 + 3.4*m.b204 == 0)
m.c172 = Constraint(expr= - m.x4 + 0.1*m.b175 + 0.2*m.b176 + 0.3*m.b177 + 0.4*m.b178 + 0.5*m.b179 + 0.6*m.b180
+ 0.7*m.b181 + 0.8*m.b182 + 0.9*m.b183 + m.b184 + 1.1*m.b185 + 1.2*m.b186 + 1.3*m.b187
+ 1.4*m.b188 + 1.5*m.b189 + 1.6*m.b190 + 1.7*m.b191 + 1.8*m.b192 + 1.9*m.b193 + 2*m.b194
+ 2.1*m.b195 + 2.2*m.b196 + 2.3*m.b197 + 2.4*m.b198 + 2.5*m.b199 + 2.6*m.b200 + 2.8*m.b201
+ 3*m.b202 + 3.2*m.b203 + 3.4*m.b204 == 0)
m.c173 = Constraint(expr= - m.x5 + 0.1*m.b175 + 0.2*m.b176 + 0.3*m.b177 + 0.4*m.b178 + 0.5*m.b179 + 0.6*m.b180
+ 0.7*m.b181 + 0.8*m.b182 + 0.9*m.b183 + m.b184 + 1.1*m.b185 + 1.2*m.b186 + 1.3*m.b187
+ 1.4*m.b188 + 1.5*m.b189 + 1.6*m.b190 + 1.7*m.b191 + 1.8*m.b192 + 1.9*m.b193 + 2*m.b194
+ 2.1*m.b195 + 2.2*m.b196 + 2.3*m.b197 + 2.4*m.b198 + 2.5*m.b199 + 2.6*m.b200 + 2.8*m.b201
+ 3*m.b202 + 3.2*m.b203 + 3.4*m.b204 == 0)
m.c174 = Constraint(expr= - m.x6 + 0.1*m.b175 + 0.2*m.b176 + 0.3*m.b177 + 0.4*m.b178 + 0.5*m.b179 + 0.6*m.b180
+ 0.7*m.b181 + 0.8*m.b182 + 0.9*m.b183 + m.b184 + 1.1*m.b185 + 1.2*m.b186 + 1.3*m.b187
+ 1.4*m.b188 + 1.5*m.b189 + 1.6*m.b190 + 1.7*m.b191 + 1.8*m.b192 + 1.9*m.b193 + 2*m.b194
+ 2.1*m.b195 + 2.2*m.b196 + 2.3*m.b197 + 2.4*m.b198 + 2.5*m.b199 + 2.6*m.b200 + 2.8*m.b201
+ 3*m.b202 + 3.2*m.b203 + 3.4*m.b204 == 0)
m.c175 = Constraint(expr= - m.x7 + 0.1*m.b205 + 0.2*m.b206 + 0.3*m.b207 + 0.4*m.b208 + 0.5*m.b209 + 0.6*m.b210
+ 0.7*m.b211 + 0.8*m.b212 + 0.9*m.b213 + m.b214 + 1.1*m.b215 + 1.2*m.b216 + 1.3*m.b217
+ 1.4*m.b218 + 1.5*m.b219 + 1.6*m.b220 + 1.7*m.b221 + 1.8*m.b222 + 1.9*m.b223 + 2*m.b224
+ 2.1*m.b225 + 2.2*m.b226 + 2.3*m.b227 + 2.4*m.b228 + 2.5*m.b229 + 2.6*m.b230 + 2.8*m.b231
+ 3*m.b232 + 3.2*m.b233 + 3.4*m.b234 == 0)
m.c176 = Constraint(expr= - m.x8 + 0.1*m.b205 + 0.2*m.b206 + 0.3*m.b207 + 0.4*m.b208 + 0.5*m.b209 + 0.6*m.b210
+ 0.7*m.b211 + 0.8*m.b212 + 0.9*m.b213 + m.b214 + 1.1*m.b215 + 1.2*m.b216 + 1.3*m.b217
+ 1.4*m.b218 + 1.5*m.b219 + 1.6*m.b220 + 1.7*m.b221 + 1.8*m.b222 + 1.9*m.b223 + 2*m.b224
+ 2.1*m.b225 + 2.2*m.b226 + 2.3*m.b227 + 2.4*m.b228 + 2.5*m.b229 + 2.6*m.b230 + 2.8*m.b231
+ 3*m.b232 + 3.2*m.b233 + 3.4*m.b234 == 0)
m.c177 = Constraint(expr= - m.x9 + 0.1*m.b205 + 0.2*m.b206 + 0.3*m.b207 + 0.4*m.b208 + 0.5*m.b209 + 0.6*m.b210
+ 0.7*m.b211 + 0.8*m.b212 + 0.9*m.b213 + m.b214 + 1.1*m.b215 + 1.2*m.b216 + 1.3*m.b217
+ 1.4*m.b218 + 1.5*m.b219 + 1.6*m.b220 + 1.7*m.b221 + 1.8*m.b222 + 1.9*m.b223 + 2*m.b224
+ 2.1*m.b225 + 2.2*m.b226 + 2.3*m.b227 + 2.4*m.b228 + 2.5*m.b229 + 2.6*m.b230 + 2.8*m.b231
+ 3*m.b232 + 3.2*m.b233 + 3.4*m.b234 == 0)
m.c178 = Constraint(expr= - m.x10 + 0.1*m.b205 + 0.2*m.b206 + 0.3*m.b207 + 0.4*m.b208 + 0.5*m.b209 + 0.6*m.b210
+ 0.7*m.b211 + 0.8*m.b212 + 0.9*m.b213 + m.b214 + 1.1*m.b215 + 1.2*m.b216 + 1.3*m.b217
+ 1.4*m.b218 + 1.5*m.b219 + 1.6*m.b220 + 1.7*m.b221 + 1.8*m.b222 + 1.9*m.b223 + 2*m.b224
+ 2.1*m.b225 + 2.2*m.b226 + 2.3*m.b227 + 2.4*m.b228 + 2.5*m.b229 + 2.6*m.b230 + 2.8*m.b231
+ 3*m.b232 + 3.2*m.b233 + 3.4*m.b234 == 0)
m.c179 = Constraint(expr= - m.x11 + 0.1*m.b235 + 0.2*m.b236 + 0.3*m.b237 + 0.4*m.b238 + 0.5*m.b239 + 0.6*m.b240
+ 0.7*m.b241 + 0.8*m.b242 + 0.9*m.b243 + m.b244 + 1.1*m.b245 + 1.2*m.b246 + 1.3*m.b247
+ 1.4*m.b248 + 1.5*m.b249 + 1.6*m.b250 + 1.7*m.b251 + 1.8*m.b252 + 1.9*m.b253 + 2*m.b254
+ 2.1*m.b255 + 2.2*m.b256 + 2.3*m.b257 + 2.4*m.b258 + 2.5*m.b259 + 2.6*m.b260 + 2.8*m.b261
+ 3*m.b262 + 3.2*m.b263 + 3.4*m.b264 == 0)
m.c180 = Constraint(expr= - m.x12 + 0.1*m.b235 + 0.2*m.b236 + 0.3*m.b237 + 0.4*m.b238 + 0.5*m.b239 + 0.6*m.b240
+ 0.7*m.b241 + 0.8*m.b242 + 0.9*m.b243 + m.b244 + 1.1*m.b245 + 1.2*m.b246 + 1.3*m.b247
+ 1.4*m.b248 + 1.5*m.b249 + 1.6*m.b250 + 1.7*m.b251 + 1.8*m.b252 + 1.9*m.b253 + 2*m.b254
+ 2.1*m.b255 + 2.2*m.b256 + 2.3*m.b257 + 2.4*m.b258 + 2.5*m.b259 + 2.6*m.b260 + 2.8*m.b261
+ 3*m.b262 + 3.2*m.b263 + 3.4*m.b264 == 0)
m.c181 = Constraint(expr= - m.x13 + 0.1*m.b265 + 0.2*m.b266 + 0.3*m.b267 + 0.4*m.b268 + 0.5*m.b269 + 0.6*m.b270
+ 0.7*m.b271 + 0.8*m.b272 + 0.9*m.b273 + m.b274 + 1.1*m.b275 + 1.2*m.b276 + 1.3*m.b277
+ 1.4*m.b278 + 1.5*m.b279 + 1.6*m.b280 + 1.7*m.b281 + 1.8*m.b282 + 1.9*m.b283 + 2*m.b284
+ 2.1*m.b285 + 2.2*m.b286 + 2.3*m.b287 + 2.4*m.b288 + 2.5*m.b289 + 2.6*m.b290 + 2.8*m.b291
+ 3*m.b292 + 3.2*m.b293 + 3.4*m.b294 == 0)
m.c182 = Constraint(expr= - m.x14 + 0.1*m.b265 + 0.2*m.b266 + 0.3*m.b267 + 0.4*m.b268 + 0.5*m.b269 + 0.6*m.b270
+ 0.7*m.b271 + 0.8*m.b272 + 0.9*m.b273 + m.b274 + 1.1*m.b275 + 1.2*m.b276 + 1.3*m.b277
+ 1.4*m.b278 + 1.5*m.b279 + 1.6*m.b280 + 1.7*m.b281 + 1.8*m.b282 + 1.9*m.b283 + 2*m.b284
+ 2.1*m.b285 + 2.2*m.b286 + 2.3*m.b287 + 2.4*m.b288 + 2.5*m.b289 + 2.6*m.b290 + 2.8*m.b291
+ 3*m.b292 + 3.2*m.b293 + 3.4*m.b294 == 0)
m.c183 = Constraint(expr= - m.x15 + 0.1*m.b295 + 0.2*m.b296 + 0.3*m.b297 + 0.4*m.b298 + 0.5*m.b299 + 0.6*m.b300
+ 0.7*m.b301 + 0.8*m.b302 + 0.9*m.b303 + m.b304 + 1.1*m.b305 + 1.2*m.b306 + 1.3*m.b307
+ 1.4*m.b308 + 1.5*m.b309 + 1.6*m.b310 + 1.7*m.b311 + 1.8*m.b312 + 1.9*m.b313 + 2*m.b314
+ 2.1*m.b315 + 2.2*m.b316 + 2.3*m.b317 + 2.4*m.b318 + 2.5*m.b319 + 2.6*m.b320 + 2.8*m.b321
+ 3*m.b322 + 3.2*m.b323 + 3.4*m.b324 == 0)
m.c184 = Constraint(expr= - m.x16 + 0.1*m.b295 + 0.2*m.b296 + 0.3*m.b297 + 0.4*m.b298 + 0.5*m.b299 + 0.6*m.b300
+ 0.7*m.b301 + 0.8*m.b302 + 0.9*m.b303 + m.b304 + 1.1*m.b305 + 1.2*m.b306 + 1.3*m.b307
+ 1.4*m.b308 + 1.5*m.b309 + 1.6*m.b310 + 1.7*m.b311 + 1.8*m.b312 + 1.9*m.b313 + 2*m.b314
+ 2.1*m.b315 + 2.2*m.b316 + 2.3*m.b317 + 2.4*m.b318 + 2.5*m.b319 + 2.6*m.b320 + 2.8*m.b321
+ 3*m.b322 + 3.2*m.b323 + 3.4*m.b324 == 0)
m.c185 = Constraint(expr= - m.x17 + 0.1*m.b295 + 0.2*m.b296 + 0.3*m.b297 + 0.4*m.b298 + 0.5*m.b299 + 0.6*m.b300
+ 0.7*m.b301 + 0.8*m.b302 + 0.9*m.b303 + m.b304 + 1.1*m.b305 + 1.2*m.b306 + 1.3*m.b307
+ 1.4*m.b308 + 1.5*m.b309 + 1.6*m.b310 + 1.7*m.b311 + 1.8*m.b312 + 1.9*m.b313 + 2*m.b314
+ 2.1*m.b315 + 2.2*m.b316 + 2.3*m.b317 + 2.4*m.b318 + 2.5*m.b319 + 2.6*m.b320 + 2.8*m.b321
+ 3*m.b322 + 3.2*m.b323 + 3.4*m.b324 == 0)
m.c186 = Constraint(expr= - m.x18 + 0.1*m.b295 + 0.2*m.b296 + 0.3*m.b297 + 0.4*m.b298 + 0.5*m.b299 + 0.6*m.b300
+ 0.7*m.b301 + 0.8*m.b302 + 0.9*m.b303 + m.b304 + 1.1*m.b305 + 1.2*m.b306 + 1.3*m.b307
+ 1.4*m.b308 + 1.5*m.b309 + 1.6*m.b310 + 1.7*m.b311 + 1.8*m.b312 + 1.9*m.b313 + 2*m.b314
+ 2.1*m.b315 + 2.2*m.b316 + 2.3*m.b317 + 2.4*m.b318 + 2.5*m.b319 + 2.6*m.b320 + 2.8*m.b321
+ 3*m.b322 + 3.2*m.b323 + 3.4*m.b324 == 0)
m.c187 = Constraint(expr= - m.x19 + 0.1*m.b325 + 0.2*m.b326 + 0.3*m.b327 + 0.4*m.b328 + 0.5*m.b329 + 0.6*m.b330
+ 0.7*m.b331 + 0.8*m.b332 + 0.9*m.b333 + m.b334 + 1.1*m.b335 + 1.2*m.b336 + 1.3*m.b337
+ 1.4*m.b338 + 1.5*m.b339 + 1.6*m.b340 + 1.7*m.b341 + 1.8*m.b342 + 1.9*m.b343 + 2*m.b344
+ 2.1*m.b345 + 2.2*m.b346 + 2.3*m.b347 + 2.4*m.b348 + 2.5*m.b349 + 2.6*m.b350 + 2.8*m.b351
+ 3*m.b352 + 3.2*m.b353 + 3.4*m.b354 == 0)
m.c188 = Constraint(expr= - m.x20 + 0.1*m.b325 + 0.2*m.b326 + 0.3*m.b327 + 0.4*m.b328 + 0.5*m.b329 + 0.6*m.b330
+ 0.7*m.b331 + 0.8*m.b332 + 0.9*m.b333 + m.b334 + 1.1*m.b335 + 1.2*m.b336 + 1.3*m.b337
+ 1.4*m.b338 + 1.5*m.b339 + 1.6*m.b340 + 1.7*m.b341 + 1.8*m.b342 + 1.9*m.b343 + 2*m.b344
+ 2.1*m.b345 + 2.2*m.b346 + 2.3*m.b347 + 2.4*m.b348 + 2.5*m.b349 + 2.6*m.b350 + 2.8*m.b351
+ 3*m.b352 + 3.2*m.b353 + 3.4*m.b354 == 0)
m.c189 = Constraint(expr= - m.x21 + 0.1*m.b325 + 0.2*m.b326 + 0.3*m.b327 + 0.4*m.b328 + 0.5*m.b329 + 0.6*m.b330
+ 0.7*m.b331 + 0.8*m.b332 + 0.9*m.b333 + m.b334 + 1.1*m.b335 + 1.2*m.b336 + 1.3*m.b337
+ 1.4*m.b338 + 1.5*m.b339 + 1.6*m.b340 + 1.7*m.b341 + 1.8*m.b342 + 1.9*m.b343 + 2*m.b344
+ 2.1*m.b345 + 2.2*m.b346 + 2.3*m.b347 + 2.4*m.b348 + 2.5*m.b349 + 2.6*m.b350 + 2.8*m.b351
+ 3*m.b352 + 3.2*m.b353 + 3.4*m.b354 == 0)
m.c190 = Constraint(expr= - m.x22 + 0.1*m.b325 + 0.2*m.b326 + 0.3*m.b327 + 0.4*m.b328 + 0.5*m.b329 + 0.6*m.b330
+ 0.7*m.b331 + 0.8*m.b332 + 0.9*m.b333 + m.b334 + 1.1*m.b335 + 1.2*m.b336 + 1.3*m.b337
+ 1.4*m.b338 + 1.5*m.b339 + 1.6*m.b340 + 1.7*m.b341 + 1.8*m.b342 + 1.9*m.b343 + 2*m.b344
+ 2.1*m.b345 + 2.2*m.b346 + 2.3*m.b347 + 2.4*m.b348 + 2.5*m.b349 + 2.6*m.b350 + 2.8*m.b351
+ 3*m.b352 + 3.2*m.b353 + 3.4*m.b354 == 0)
m.c191 = Constraint(expr= - m.x23 + 0.1*m.b355 + 0.2*m.b356 + 0.3*m.b357 + 0.4*m.b358 + 0.5*m.b359 + 0.6*m.b360
+ 0.7*m.b361 + 0.8*m.b362 + 0.9*m.b363 + m.b364 + 1.1*m.b365 + 1.2*m.b366 + 1.3*m.b367
+ 1.4*m.b368 + 1.5*m.b369 + 1.6*m.b370 + 1.7*m.b371 + 1.8*m.b372 + 1.9*m.b373 + 2*m.b374
+ 2.1*m.b375 + 2.2*m.b376 + 2.3*m.b377 + 2.4*m.b378 + 2.5*m.b379 + 2.6*m.b380 + 2.8*m.b381
+ 3*m.b382 + 3.2*m.b383 + 3.4*m.b384 == 0)
m.c192 = Constraint(expr= - m.x24 + 0.1*m.b355 + 0.2*m.b356 + 0.3*m.b357 + 0.4*m.b358 + 0.5*m.b359 + 0.6*m.b360
+ 0.7*m.b361 + 0.8*m.b362 + 0.9*m.b363 + m.b364 + 1.1*m.b365 + 1.2*m.b366 + 1.3*m.b367
+ 1.4*m.b368 + 1.5*m.b369 + 1.6*m.b370 + 1.7*m.b371 + 1.8*m.b372 + 1.9*m.b373 + 2*m.b374
+ 2.1*m.b375 + 2.2*m.b376 + 2.3*m.b377 + 2.4*m.b378 + 2.5*m.b379 + 2.6*m.b380 + 2.8*m.b381
+ 3*m.b382 + 3.2*m.b383 + 3.4*m.b384 == 0)
m.c193 = Constraint(expr= - m.x25 + 0.1*m.b355 + 0.2*m.b356 + 0.3*m.b357 + 0.4*m.b358 + 0.5*m.b359 + 0.6*m.b360
+ 0.7*m.b361 + 0.8*m.b362 + 0.9*m.b363 + m.b364 + 1.1*m.b365 + 1.2*m.b366 + 1.3*m.b367
+ 1.4*m.b368 + 1.5*m.b369 + 1.6*m.b370 + 1.7*m.b371 + 1.8*m.b372 + 1.9*m.b373 + 2*m.b374
+ 2.1*m.b375 + 2.2*m.b376 + 2.3*m.b377 + 2.4*m.b378 + 2.5*m.b379 + 2.6*m.b380 + 2.8*m.b381
+ 3*m.b382 + 3.2*m.b383 + 3.4*m.b384 == 0)
m.c194 = Constraint(expr= - m.x26 + 0.1*m.b355 + 0.2*m.b356 + 0.3*m.b357 + 0.4*m.b358 + 0.5*m.b359 + 0.6*m.b360
+ 0.7*m.b361 + 0.8*m.b362 + 0.9*m.b363 + m.b364 + 1.1*m.b365 + 1.2*m.b366 + 1.3*m.b367
+ 1.4*m.b368 + 1.5*m.b369 + 1.6*m.b370 + 1.7*m.b371 + 1.8*m.b372 + 1.9*m.b373 + 2*m.b374
+ 2.1*m.b375 + 2.2*m.b376 + 2.3*m.b377 + 2.4*m.b378 + 2.5*m.b379 + 2.6*m.b380 + 2.8*m.b381
+ 3*m.b382 + 3.2*m.b383 + 3.4*m.b384 == 0)
m.c195 = Constraint(expr= m.b145 + m.b146 + m.b147 + m.b148 + m.b149 + m.b150 + m.b151 + m.b152 + m.b153 + m.b154
+ m.b155 + m.b156 + m.b157 + m.b158 + m.b159 + m.b160 + m.b161 + m.b162 + m.b163 + m.b164
+ m.b165 + m.b166 + m.b167 + m.b168 + m.b169 + m.b170 + m.b171 + m.b172 + m.b173 + m.b174
== 1)
m.c196 = Constraint(expr= m.b175 + m.b176 + m.b177 + m.b178 + m.b179 + m.b180 + m.b181 + m.b182 + m.b183 + m.b184
+ m.b185 + m.b186 + m.b187 + m.b188 + m.b189 + m.b190 + m.b191 + m.b192 + m.b193 + m.b194
+ m.b195 + m.b196 + m.b197 + m.b198 + m.b199 + m.b200 + m.b201 + m.b202 + m.b203 + m.b204
== 1)
m.c197 = Constraint(expr= m.b205 + m.b206 + m.b207 + m.b208 + m.b209 + m.b210 + m.b211 + m.b212 + m.b213 + m.b214
+ m.b215 + m.b216 + m.b217 + m.b218 + m.b219 + m.b220 + m.b221 + m.b222 + m.b223 + m.b224
+ m.b225 + m.b226 + m.b227 + m.b228 + m.b229 + m.b230 + m.b231 + m.b232 + m.b233 + m.b234
== 1)
m.c198 = Constraint(expr= m.b235 + m.b236 + m.b237 + m.b238 + m.b239 + m.b240 + m.b241 + m.b242 + m.b243 + m.b244
+ m.b245 + m.b246 + m.b247 + m.b248 + m.b249 + m.b250 + m.b251 + m.b252 + m.b253 + m.b254
+ m.b255 + m.b256 + m.b257 + m.b258 + m.b259 + m.b260 + m.b261 + m.b262 + m.b263 + m.b264
== 1)
m.c199 = Constraint(expr= m.b265 + m.b266 + m.b267 + m.b268 + m.b269 + m.b270 + m.b271 + m.b272 + m.b273 + m.b274
+ m.b275 + m.b276 + m.b277 + m.b278 + m.b279 + m.b280 + m.b281 + m.b282 + m.b283 + m.b284
+ m.b285 + m.b286 + m.b287 + m.b288 + m.b289 + m.b290 + m.b291 + m.b292 + m.b293 + m.b294
== 1)
m.c200 = Constraint(expr= m.b295 + m.b296 + m.b297 + m.b298 + m.b299 + m.b300 + m.b301 + m.b302 + m.b303 + m.b304
+ m.b305 + m.b306 + m.b307 + m.b308 + m.b309 + m.b310 + m.b311 + m.b312 + m.b313 + m.b314
+ m.b315 + m.b316 + m.b317 + m.b318 + m.b319 + m.b320 + m.b321 + m.b322 + m.b323 + m.b324
== 1)
m.c201 = Constraint(expr= m.b325 + m.b326 + m.b327 + m.b328 + m.b329 + m.b330 + m.b331 + m.b332 + m.b333 + m.b334
+ m.b335 + m.b336 + m.b337 + m.b338 + m.b339 + m.b340 + m.b341 + m.b342 + m.b343 + m.b344
+ m.b345 + m.b346 + m.b347 + m.b348 + m.b349 + m.b350 + m.b351 + m.b352 + m.b353 + m.b354
== 1)
m.c202 = Constraint(expr= m.b355 + m.b356 + m.b357 + m.b358 + m.b359 + m.b360 + m.b361 + m.b362 + m.b363 + m.b364
+ m.b365 + m.b366 + m.b367 + m.b368 + m.b369 + m.b370 + m.b371 + m.b372 + m.b373 + m.b374
+ m.b375 + m.b376 + m.b377 + m.b378 + m.b379 + m.b380 + m.b381 + m.b382 + m.b383 + m.b384
== 1)
| StarcoderdataPython |
179572 | <reponame>NichCritic/pymud
from pynlg.realizer import NounConjunction, NounPhrase, VerbPhrase, PrepositionalPhrase, Clause
from pynlg.lexicon import Noun, Adjective, Verb
class ObjectDescriber(object):
def __init__(self, lexicon):
self.lex = lexicon
'''
Find the target in the provided tree, then splits the tree at that level, creating a list of objects to render
'''
def group_by_target(self, obj, target):
pass
def describe_graph(self, graph_node, target=None):
if target is None:
stack = [graph_node]
there = NounPhrase(self.lex.getWord('there', 'NOUN'))
while 'subject' in stack[0]:
stack.insert(0, stack[0]['subject'])
elem = stack[0]
if 'action' in elem:
vb = self.lex.getWord(elem['action']['verb'], 'VERB')
adj = self.lex.getWord(
elem['action']['character'], 'ADJECTIVE')
subject = NounPhrase(self.lex.getWord(
elem['action']['type'], 'NOUN'), adjectives=[adj])
subject.add_determiner(self.lex.getWord('a'))
vp = VerbPhrase(vb)
pp = PrepositionalPhrase(self.lex.getWord('from'), noun_phrases=[
self.describe_object(elem)])
vp.add_prepositional_phrase(pp)
clause = Clause(subject, vp)
else:
vp = VerbPhrase(self.lex.getWord(
'be'), direct_object=self.describe_object(elem))
clause = Clause(there, vp)
for elem in stack[1:]:
pp = PrepositionalPhrase(self.lex.getWord(elem['location'], 'PREPOSITION'), noun_phrases=[
self.describe_object(elem['object'])])
vp.add_prepositional_phrase(pp)
return clause.realize()
def preprocess(self, obj):
noun = obj.names.name
descriptors = []
if obj.has('material'):
descriptors.append(obj.material.get_material()['descriptor'])
return {
'noun': noun,
'descriptors': descriptors
}
def get_noun(self, word, is_name):
ret = None
try:
ret = self.lex.getWord(word, 'NOUN')
except Exception:
ret = Noun(word, features=['proper'] if is_name else [])
return ret
def get_adjective(self, word):
ret = None
try:
ret = self.lex.get_word(word, 'ADJECTIVE')
except Exception:
ret = Adjective(word, category='ADJECTIVE')
return ret
def describe(self, data):
dtype, groups = data
#This code has to exist somewhere...
group_nps = []
for g in groups:
first = g[0]
material_desc = g[0].material.get_material()['descriptor']
material_adj = self.get_adjective(material_desc)
nouns = [NounPhrase(self.get_noun(n.names.name, False)) for n in g]
nc = NounConjunction(nouns)
np = NounPhrase(nc, adjectives=[material_adj])
np.add_determiner(self.lex.getWord('the'))
group_nps.append(np)
group_conj = NounConjunction(group_nps)
res = NounPhrase(group_conj)
return res
def describe_object(self, obj):
pp = self.preprocess(obj)
ads = [self.get_adjective(desc)
for desc in pp['descriptors']]
subject = self.get_noun(pp['noun'], obj.has('avatar_type'))
noun = NounPhrase(subject, adjectives=ads)
if not 'proper' in subject.features:
noun.add_determiner(self.lex.getWord('a'))
return noun
| StarcoderdataPython |
119875 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SponsorListPlugin'
db.create_table('cmsplugin_sponsorlistplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('group', self.gf('django.db.models.fields.BooleanField')(default=False)),
('split_list_length', self.gf('django.db.models.fields.IntegerField')(default=None, null=True, blank=True)),
('custom_css_classes', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal('sponsorship', ['SponsorListPlugin'])
# Adding M2M table for field levels on 'SponsorListPlugin'
db.create_table('sponsorship_sponsorlistplugin_levels', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sponsorlistplugin', models.ForeignKey(orm['sponsorship.sponsorlistplugin'], null=False)),
('sponsorlevel', models.ForeignKey(orm['sponsorship.sponsorlevel'], null=False))
))
db.create_unique('sponsorship_sponsorlistplugin_levels', ['sponsorlistplugin_id', 'sponsorlevel_id'])
def backwards(self, orm):
# Deleting model 'SponsorListPlugin'
db.delete_table('cmsplugin_sponsorlistplugin')
# Removing M2M table for field levels on 'SponsorListPlugin'
db.delete_table('sponsorship_sponsorlistplugin_levels')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reviews_active': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reviews_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviews_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sponsorship.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'annotation': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sponsorship.SponsorLevel']"}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sponsorship.sponsorlevel': {
'Meta': {'ordering': "['conference', 'order']", 'object_name': 'SponsorLevel'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'sponsorship.sponsorlistplugin': {
'Meta': {'object_name': 'SponsorListPlugin', 'db_table': "'cmsplugin_sponsorlistplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'custom_css_classes': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sponsorship.SponsorLevel']", 'symmetrical': 'False'}),
'split_list_length': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['sponsorship'] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.