id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4948943 | <reponame>SilentByte/lyngua
from fastapi import APIRouter, Response
ping_router = APIRouter()
@ping_router.get('/ping', status_code=204, response_class=Response)
def ping() -> None:
pass
| StarcoderdataPython |
3554726 | <reponame>guiguid/core
"""Tests for the Abode module."""
from unittest.mock import patch
from homeassistant.components.abode import (
DOMAIN as ABODE_DOMAIN,
SERVICE_CAPTURE_IMAGE,
SERVICE_SETTINGS,
SERVICE_TRIGGER_AUTOMATION,
)
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM_DOMAIN
from .common import setup_platform
async def test_change_settings(hass):
"""Test change_setting service."""
await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.set_setting") as mock_set_setting:
await hass.services.async_call(
ABODE_DOMAIN,
SERVICE_SETTINGS,
{"setting": "confirm_snd", "value": "loud"},
blocking=True,
)
await hass.async_block_till_done()
mock_set_setting.assert_called_once()
async def test_unload_entry(hass):
"""Test unloading the Abode entry."""
mock_entry = await setup_platform(hass, ALARM_DOMAIN)
with patch("abodepy.Abode.logout") as mock_logout, patch(
"abodepy.event_controller.AbodeEventController.stop"
) as mock_events_stop:
assert await hass.config_entries.async_unload(mock_entry.entry_id)
mock_logout.assert_called_once()
mock_events_stop.assert_called_once()
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_SETTINGS)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_CAPTURE_IMAGE)
assert not hass.services.has_service(ABODE_DOMAIN, SERVICE_TRIGGER_AUTOMATION)
| StarcoderdataPython |
1910678 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def plusOne(self, head: ListNode) -> ListNode:
sentinel = ListNode(0, head)
nonNineNode = sentinel
while head:
if head.val != 9:
nonNineNode = head
head = head.next
nonNineNode.val = nonNineNode.val + 1
nonNineNode = nonNineNode.next
while nonNineNode:
nonNineNode.val = 0
nonNineNode = nonNineNode.next
return sentinel if sentinel.val else sentinel.next | StarcoderdataPython |
3561182 | """
2D covolution: image filtering
Low pass filter(LPF): help to remove noises
High pass filter(HPF): help to find edges
Question: how to get the kernel I want?
"""
import cv2
import matplotlib.pyplot as plt
"""
Image blurring: with LPF
1. Averaging
This is done by convolving image with a normalized box filter. It simply take
the average of all the pixel under the area and replace the central pixel.
"""
def test_Blur():
img=cv2.imread('opencv-logo-white.png')
blur=cv2.blur(img,(5,5))
plt.subplot(121),plt.imshow(img),plt.title("original")
plt.subplot(122),plt.imshow(blur),plt.title("blurred")
plt.show()
"""
2. Gaussion blurring
gaussian kernel is used.
"""
def test_GaussianBlur():
img=cv2.imread('opencv-logo-white.png')
blur=cv2.GaussianBlur(img,(5,5),0)
plt.subplot(121),plt.imshow(img),plt.title("original")
plt.subplot(122),plt.imshow(blur),plt.title("blurred")
plt.show()
"""
Median Blurring:
compare with average bluring, central pixel is replaced with median of pixel under
filter area.
"""
def test_MedianBlur():
img=cv2.imread('opencv-logo-white.png')
blur=cv2.medianBlur(img,5)
plt.subplot(121),plt.imshow(img),plt.title("original")
plt.subplot(122),plt.imshow(blur),plt.title("blurred")
plt.show()
if __name__ == "__main__":
# test_Blur()
# test_GaussianBlur()
test_MedianBlur()
| StarcoderdataPython |
6582063 | from a10sdk.common.A10BaseClass import A10BaseClass
class Ds(A10BaseClass):
""" :param ds_delete: {"default": 0, "optional": true, "type": "number", "description": "Delete the DS file", "format": "flag"}
:param zone_name: {"description": "DNS zone name of the child zone", "format": "string", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Delegation Signer(DS) Resource Records of child zones.
Class ds supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/dnssec/ds`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ds"
self.a10_url="/axapi/v3/dnssec/ds"
self.DeviceProxy = ""
self.ds_delete = ""
self.zone_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| StarcoderdataPython |
202167 | #!/usr/bin/env python3.7
from .swaszek_strategy import SwaszekStrategy
from .agent import AgentNextPos, AgentRandom, AgentSamePos
| StarcoderdataPython |
1822696 | <reponame>vanigupta20024/Programming-Challenges<gh_stars>10-100
'''
A string is a valid parentheses string (denoted VPS) if it meets one of the following:
It is an empty string "", or a single character not equal to "(" or ")",
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(C) = 0, where C is a string with a single character not equal to "(" or ")".
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's.
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS represented as string s, return the nesting depth of s.
'''
class Solution:
def maxDepth(self, s: str) -> int:
op = mx = 0
for i in s:
if i is "(": op += 1
elif i is ")":
if mx < op: mx = op
op -= 1
return mx
| StarcoderdataPython |
1816797 | from flask_restplus import Api, Resource, reqparse, Namespace
from flask import request
from os import path, remove
from classes import RecordedVideo
from classes import apikey
from classes import upvotes
from classes.shared import db
from globals import globalvars
api = Namespace('clip', description='Clip Related Queries and Functions')
clipParserPut = reqparse.RequestParser()
clipParserPut.add_argument('clipName', type=str)
clipParserPut.add_argument('description', type=str)
@api.route('/')
class api_1_ListClips(Resource):
def get(self):
"""
Returns a List of All Saved Clips
"""
clipsList = RecordedVideo.Clips.query.filter_by(published=True).all()
db.session.commit()
return {'results': [ob.serialize() for ob in clipsList]}
@api.route('/<int:clipID>')
@api.doc(params={'clipID': 'ID Number for the Clip'})
class api_1_ListClip(Resource):
def get(self, clipID):
"""
Returns Info on a Single Saved Clip
"""
clipList = RecordedVideo.Clips.query.filter_by(id=clipID, published=True).all()
db.session.commit()
return {'results': [ob.serialize() for ob in clipList]}
@api.expect(clipParserPut)
@api.doc(security='apikey')
@api.doc(responses={200: 'Success', 400: 'Request Error'})
def put(self, clipID):
"""
Change a Clip's Name or Description
"""
if 'X-API-KEY' in request.headers:
requestAPIKey = apikey.apikey.query.filter_by(key=request.headers['X-API-KEY']).first()
if requestAPIKey is not None:
if requestAPIKey.isValid():
clipQuery = RecordedVideo.Clips.query.filter_by(id=int(clipID)).first()
if clipQuery is not None:
if clipQuery.recordedVideo.owningUser == requestAPIKey.userID:
args = clipParserPut.parse_args()
if 'clipName' in args:
if args['clipName'] is not None:
clipQuery.clipName = args['clipName']
if 'description' in args:
if args['description'] is not None:
clipQuery.description = args['description']
db.session.commit()
return {'results': {'message': 'Clip Updated'}}, 200
return {'results': {'message': 'Request Error'}}, 400
@api.doc(security='apikey')
@api.doc(responses={200: 'Success', 400: 'Request Error'})
def delete(self, clipID):
"""
Deletes a Clip
"""
if 'X-API-KEY' in request.headers:
requestAPIKey = apikey.apikey.query.filter_by(key=request.headers['X-API-KEY']).first()
if requestAPIKey is not None:
if requestAPIKey.isValid():
clipQuery = RecordedVideo.Clips.query.filter_by(id=clipID).first()
if clipQuery is not None:
if clipQuery.owningUser == requestAPIKey.userID:
videos_root = globalvars.videoRoot + 'videos/'
thumbnailPath = videos_root + clipQuery.thumbnailLocation
if thumbnailPath != videos_root:
if path.exists(thumbnailPath) and clipQuery.thumbnailLocation is not None and clipQuery.thumbnailLocation != "":
remove(thumbnailPath)
upvoteQuery = upvotes.clipUpvotes.query.filter_by(clipID=clipQuery.id).all()
for vote in upvoteQuery:
db.session.delete(vote)
db.session.delete(clipQuery)
db.session.commit()
return {'results': {'message': 'Clip Deleted'}}, 200
return {'results': {'message': 'Request Error'}}, 400 | StarcoderdataPython |
11384718 | from typing import Callable, Dict
import logging
log = logging.getLogger(__name__)
class Registry:
registry: Dict[str, Callable] = {}
@classmethod
def register(cls, name: str) -> Callable:
def wrapper(wrapped: Callable) -> Callable:
if name in cls.registry:
log.warning("Overwriting '%s' in class registry", name)
cls.registry[name] = wrapped
return wrapped
return wrapper
@classmethod
def create(cls, name: str) -> Callable:
if name not in cls.registry:
raise ValueError(f"class with name {name} is not registered")
return cls.registry[name]()
| StarcoderdataPython |
1740663 | import netrc
import os
netrcFileName = '.netrc'
def get_credentials_from_file( service, authFile=None ):
if not authFile is None:
if os.path.isdir(authFile):
authFile = os.path.join( authFile, netrcFileName )
creds = netrc.netrc( authFile ).authenticators(service)
return creds
def get_credentials( authPathEnvVar, service, authFile=None ):
if authFile is None:
if authPathEnvVar in os.environ:
authPath = os.environ[authPathEnvVar]
authFile = os.path.join(authPath, netrcFileName)
return get_credentials_from_file( service, authFile )
| StarcoderdataPython |
4914773 | <reponame>thepineapplepirate/galaxy<gh_stars>0
import pytest
from galaxy.model import tool_shed_install as model
from ...testing_utils import (
dbcleanup_wrapper,
initialize_model,
)
@pytest.fixture(scope="module")
def init_model(engine):
"""Create model objects in the engine's database."""
# Must use the same engine as the session fixture used by this module.
initialize_model(model.mapper_registry, engine)
# Fixtures yielding persisted instances of models, deleted from the database on test exit.
@pytest.fixture
def repository(session):
instance = model.ToolShedRepository()
yield from dbcleanup_wrapper(session, instance)
@pytest.fixture
def repository_repository_dependency_association(session):
instance = model.RepositoryRepositoryDependencyAssociation()
yield from dbcleanup_wrapper(session, instance)
@pytest.fixture
def repository_dependency(session, repository):
instance = model.RepositoryDependency(repository.id)
yield from dbcleanup_wrapper(session, instance)
@pytest.fixture
def tool_dependency(session, repository):
instance = model.ToolDependency()
instance.tool_shed_repository = repository
instance.status = "a"
yield from dbcleanup_wrapper(session, instance)
@pytest.fixture
def tool_version(session):
instance = model.ToolVersion()
yield from dbcleanup_wrapper(session, instance)
# Fixtures yielding factory functions.
@pytest.fixture
def tool_version_association_factory():
def make_instance(*args, **kwds):
return model.ToolVersionAssociation(*args, **kwds)
return make_instance
@pytest.fixture
def tool_version_factory():
def make_instance(*args, **kwds):
return model.ToolVersion(*args, **kwds)
return make_instance
| StarcoderdataPython |
4979540 |
from collections import namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims, to_onehot, select_at_indexes
from rlpyt.models.mlp import MlpModel
from rlpyt.distributions.gaussian import Gaussian, DistInfoStd
from rlpyt.distributions.categorical import Categorical, DistInfo
MIN_LOG_STD = -20
MAX_LOG_STD = 2
class MuMlpModel(torch.nn.Module):
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
output_max=1,
):
super().__init__()
self._output_max = output_max
self._obs_ndim = len(observation_shape)
input_dim = int(np.prod(observation_shape))
self.mlp = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=action_size,
)
def forward_embedding(self, observation):
return observation
def forward_output(self, observation):
return self(observation, None, None)
def forward(self, observation, prev_action, prev_reward):
lead_dim, T, B, _ = infer_leading_dims(observation, self._obs_ndim)
mu = self._output_max * torch.tanh(self.mlp(observation.view(T * B, -1)))
mu = restore_leading_dims(mu, lead_dim, T, B)
return mu
class PiMlpModel(torch.nn.Module):
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
):
super().__init__()
# action_size = 3
self._obs_ndim = 1
input_dim = int(np.sum(observation_shape))
# self._obs_ndim = len(observation_shape)
# input_dim = int(np.prod(observation_shape))
self._action_size = action_size
self.mlp = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=action_size * 2,
)
def forward_embedding(self, observation):
return observation
def forward_output(self, observation):
return self(observation, None, None)
def forward(self, observation, prev_action, prev_reward):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
output = self.mlp(observation.view(T * B, -1))
mu, log_std = output[:, :self._action_size], output[:, self._action_size:]
mu, log_std = restore_leading_dims((mu, log_std), lead_dim, T, B)
return mu, log_std
class AutoregPiMlpModel(torch.nn.Module):
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
n_tile=50,
loc_size=2,
delta_size=3,
):
super().__init__()
self._obs_ndim = 1
input_dim = int(np.sum(observation_shape))
self._n_tile = n_tile
self._loc_size = loc_size
self._delta_size = delta_size
# self._obs_ndim = len(observation_shape)
# input_dim = int(np.prod(observation_shape))
assert action_size == loc_size + delta_size # First 2 (location), then 3 (action)
self._action_size = action_size
self.mlp_loc = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=loc_size * 2
)
self.mlp_delta = MlpModel(
input_size=input_dim + loc_size * n_tile,
hidden_sizes=hidden_sizes,
output_size=delta_size * 2,
)
self._counter = 0
def start(self):
self._counter = 0
def next(self, actions, observation, prev_action, prev_reward):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
input_obs = observation.view(T * B, -1)
if self._counter == 0:
output = self.mlp_loc(input_obs)
mu, log_std = output.chunk(2, dim=-1)
elif self._counter == 1:
assert len(actions) == 1
action_loc = actions[0].view(T * B, -1)
model_input = torch.cat((input_obs, action_loc.repeat((1, self._n_tile))), dim=-1)
output = self.mlp_delta(model_input)
mu, log_std = output.chunk(2, dim=-1)
else:
raise Exception('Invalid self._counter', self._counter)
mu, log_std = restore_leading_dims((mu, log_std), lead_dim, T, B)
self._counter += 1
return mu, log_std
def has_next(self):
return self._counter < 2
GumbelDistInfo = namedtuple('GumbelDistInfo', ['cat_dist', 'delta_dist'])
class GumbelPiMlpModel(torch.nn.Module):
"""For picking corners"""
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
all_corners=False
):
super().__init__()
self._obs_ndim = 1
self._all_corners = all_corners
input_dim = int(np.sum(observation_shape))
print('all corners', self._all_corners)
delta_dim = 12 if all_corners else 3
self._delta_dim = delta_dim
self.mlp = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=2 * delta_dim + 4, # 3 for each corners, times two for std, 4 probs
)
self.delta_distribution = Gaussian(
dim=delta_dim,
squash=True,
min_std=np.exp(MIN_LOG_STD),
max_std=np.exp(MAX_LOG_STD),
)
self.cat_distribution = Categorical(4)
def forward(self, observation, prev_action, prev_reward):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
output = self.mlp(observation.view(T * B, -1))
logits = output[:, :4]
mu, log_std = output[:, 4:4 + self._delta_dim], output[:, 4 + self._delta_dim:]
logits, mu, log_std = restore_leading_dims((logits, mu, log_std), lead_dim, T, B)
return GumbelDistInfo(cat_dist=logits, delta_dist=DistInfoStd(mean=mu, log_std=log_std))
def sample_loglikelihood(self, dist_info):
logits, delta_dist_info = dist_info.cat_dist, dist_info.delta_dist
u = torch.rand_like(logits)
u = torch.clamp(u, 1e-5, 1 - 1e-5)
gumbel = -torch.log(-torch.log(u))
prob = F.softmax((logits + gumbel) / 10, dim=-1)
cat_sample = torch.argmax(prob, dim=-1)
cat_loglikelihood = select_at_indexes(cat_sample, prob)
one_hot = to_onehot(cat_sample, 4, dtype=torch.float32)
one_hot = (one_hot - prob).detach() + prob # Make action differentiable through prob
if self._all_corners:
mu, log_std = delta_dist_info.mean, delta_dist_info.log_std
mu, log_std = mu.view(-1, 4, 3), log_std.view(-1, 4, 3)
mu = mu[torch.arange(len(cat_sample)), cat_sample.squeeze(-1)]
log_std = log_std[torch.arange(len(cat_sample)), cat_sample.squeeze(-1)]
new_dist_info = DistInfoStd(mean=mu, log_std=log_std)
else:
new_dist_info = delta_dist_info
delta_sample, delta_loglikelihood = self.delta_distribution.sample_loglikelihood(new_dist_info)
action = torch.cat((one_hot, delta_sample), dim=-1)
log_likelihood = cat_loglikelihood + delta_loglikelihood
return action, log_likelihood
def sample(self, dist_info):
logits, delta_dist_info = dist_info.cat_dist, dist_info.delta_dist
u = torch.rand_like(logits)
u = torch.clamp(u, 1e-5, 1 - 1e-5)
gumbel = -torch.log(-torch.log(u))
prob = F.softmax((logits + gumbel) / 10, dim=-1)
cat_sample = torch.argmax(prob, dim=-1)
one_hot = to_onehot(cat_sample, 4, dtype=torch.float32)
if len(prob.shape) == 1: # Edge case for when it gets buffer shapes
cat_sample = cat_sample.unsqueeze(0)
if self._all_corners:
mu, log_std = delta_dist_info.mean, delta_dist_info.log_std
mu, log_std = mu.view(-1, 4, 3), log_std.view(-1, 4, 3)
mu = select_at_indexes(cat_sample, mu)
log_std = select_at_indexes(cat_sample, log_std)
if len(prob.shape) == 1: # Edge case for when it gets buffer shapes
mu, log_std = mu.squeeze(0), log_std.squeeze(0)
new_dist_info = DistInfoStd(mean=mu, log_std=log_std)
else:
new_dist_info = delta_dist_info
if self.training:
self.delta_distribution.set_std(None)
else:
self.delta_distribution.set_std(0)
delta_sample = self.delta_distribution.sample(new_dist_info)
return torch.cat((one_hot, delta_sample), dim=-1)
class GumbelAutoregPiMlpModel(torch.nn.Module):
"""For picking corners autoregressively"""
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
n_tile=20,
):
super().__init__()
self._obs_ndim = 1
self._n_tile = n_tile
input_dim = int(np.sum(observation_shape))
self._action_size = action_size
self.mlp_loc = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=4
)
self.mlp_delta = MlpModel(
input_size=input_dim + 4 * n_tile,
hidden_sizes=hidden_sizes,
output_size=3 * 2,
)
self.delta_distribution = Gaussian(
dim=3,
squash=True,
min_std=np.exp(MIN_LOG_STD),
max_std=np.exp(MAX_LOG_STD),
)
self.cat_distribution = Categorical(4)
self._counter = 0
def start(self):
self._counter = 0
def next(self, actions, observation, prev_action, prev_reward):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
input_obs = observation.view(T * B, -1)
if self._counter == 0:
logits = self.mlp_loc(input_obs)
logits = restore_leading_dims(logits, lead_dim, T, B)
self._counter += 1
return logits
elif self._counter == 1:
assert len(actions) == 1
action_loc = actions[0].view(T * B, -1)
model_input = torch.cat((input_obs, action_loc.repeat((1, self._n_tile))), dim=-1)
output = self.mlp_delta(model_input)
mu, log_std = output.chunk(2, dim=-1)
mu, log_std = restore_leading_dims((mu, log_std), lead_dim, T, B)
self._counter += 1
return DistInfoStd(mean=mu, log_std=log_std)
else:
raise Exception('Invalid self._counter', self._counter)
def has_next(self):
return self._counter < 2
def sample_loglikelihood(self, dist_info):
if isinstance(dist_info, DistInfoStd):
action, log_likelihood = self.delta_distribution.sample_loglikelihood(dist_info)
else:
logits = dist_info
u = torch.rand_like(logits)
u = torch.clamp(u, 1e-5, 1 - 1e-5)
gumbel = -torch.log(-torch.log(u))
prob = F.softmax((logits + gumbel) / 10, dim=-1)
cat_sample = torch.argmax(prob, dim=-1)
log_likelihood = select_at_indexes(cat_sample, prob)
one_hot = to_onehot(cat_sample, 4, dtype=torch.float32)
action = (one_hot - prob).detach() + prob # Make action differentiable through prob
return action, log_likelihood
def sample(self, dist_info):
if isinstance(dist_info, DistInfoStd):
if self.training:
self.delta_distribution.set_std(None)
else:
self.delta_distribution.set_std(0)
action = self.delta_distribution.sample(dist_info)
else:
logits = dist_info
u = torch.rand_like(logits)
u = torch.clamp(u, 1e-5, 1 - 1e-5)
gumbel = -torch.log(-torch.log(u))
prob = F.softmax((logits + gumbel) / 10, dim=-1)
cat_sample = torch.argmax(prob, dim=-1)
action = to_onehot(cat_sample, 4, dtype=torch.float32)
return action
class QofMuMlpModel(torch.nn.Module):
def __init__(
self,
observation_shape,
hidden_sizes,
action_size,
n_tile=1,
):
super().__init__()
self._obs_ndim = 1
self._n_tile = n_tile
# action_size = 3
input_dim = int(np.sum(observation_shape))
# self._obs_ndim = len(observation_shape)
# input_dim = int(np.prod(observation_shape))
input_dim += action_size * n_tile
self.mlp = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=1,
)
def forward_embedding(self, observation):
return observation
def forward_output(self, observation, action):
return self(observation, None, None, action)
def forward(self, observation, prev_action, prev_reward, action):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
action = action.view(T * B, -1).repeat(1, self._n_tile)
q_input = torch.cat(
[observation.view(T * B, -1), action], dim=1)
q = self.mlp(q_input).squeeze(-1)
q = restore_leading_dims(q, lead_dim, T, B)
return q
class VMlpModel(torch.nn.Module):
def __init__(
self,
observation_shape,
hidden_sizes,
action_size=None, # Unused but accept kwarg.
):
super().__init__()
self._obs_ndim = 1
input_dim = int(np.sum(observation_shape))
# self._obs_ndim = len(observation_shape)
# input_dim = int(np.prod(observation_shape))
self.mlp = MlpModel(
input_size=input_dim,
hidden_sizes=hidden_sizes,
output_size=1,
)
def forward(self, observation, prev_action, prev_reward):
if isinstance(observation, tuple):
observation = torch.cat(observation, dim=-1)
lead_dim, T, B, _ = infer_leading_dims(observation,
self._obs_ndim)
v = self.mlp(observation.view(T * B, -1)).squeeze(-1)
v = restore_leading_dims(v, lead_dim, T, B)
return v
| StarcoderdataPython |
11274027 | # Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNormalization(TestCase):
def op_exec(self, npu_flag, input1, dim):
m = torch.nn.BatchNorm2d(dim)
if npu_flag:
m = m.to("npu")
input_new = m(input1)
if npu_flag:
input_new = input_new.to("cpu")
input_new = input_new.detach().numpy()
input1.requires_grad_(True)
w = torch.ones_like(input1)
if npu_flag:
w = w.to("npu")
tmp = m(input1)
tmp.backward(w)
output = input1.grad
if npu_flag:
output = output.to("cpu")
output = output.detach().numpy()
return output, input_new
def test_batchnorm_shape_format_fp16(self, device):
format_list = [0]
shape_list = [[256, 672, 7, 7],[1024, 58, 28, 28]]
shape_format = [
[np.float16, i, j] for i in format_list for j in shape_list
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 0, 100)
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output, cpu_input = self.op_exec(0, cpu_input1, item[2][1])
npu_output, npu_input = self.op_exec(1, npu_input1, item[2][1])
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
cpu_input = cpu_input.astype(npu_input.dtype)
self.assertRtolEqual(cpu_input, npu_input)
def test_batchnorm_shape_format_fp32(self, device):
format_list = [0]
shape_list = [(256, 32, 112, 112)]
shape_format = [
[np.float32, i, j] for i in format_list for j in shape_list
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item, 0, 100)
cpu_output, cpu_input = self.op_exec(0, cpu_input1, item[2][1])
npu_output, npu_input = self.op_exec(1, npu_input1, item[2][1])
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
cpu_input = cpu_input.astype(npu_input.dtype)
self.assertRtolEqual(cpu_input, npu_input)
instantiate_device_type_tests(TestNormalization, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| StarcoderdataPython |
6703825 | <filename>w02-calling-functions/checkpoint-boxes/boxes.py
import math
items = int(input("Enter the number of items: "))
items_box = int(input("Enter the number of items per box: "))
boxes = math.ceil(items / items_box)
print(f"For {items} items, packing {items_box} items in each box, you will need {boxes} boxes.") | StarcoderdataPython |
5066676 | <gh_stars>0
'''
Created on 04.03.2013
@author: hlampesberger
'''
from base import Result
from regular import nfa
from regular.dfa import DFA
import unittest
class TestDFA(unittest.TestCase):
def setUp(self):
alphabet = ["a", "b"]
s = ["q1", "q2", "q3", "q4"]
transitions = [("q1", "b", "q1"), ("q1", "a", "q2"),
("q2", "a", "q3"), ("q2", "b", "q4"),
("q3", "a", "q4"), ("q3", "b", "q3"),
("q4", "a", "q2"), ("q4", "b", "q1")]
acc_s = ["q2", "q3"]
rej_s = ["q4"]
self.fsa = DFA.build(alphabet=alphabet, states=s,
start_state="q1",
accept_states=acc_s,
reject_states=rej_s,
transitions=transitions)
def test_DFA_membership(self):
self.assertEqual(self.fsa.membership(""), Result.neutral)
self.assertEqual(self.fsa.membership("a"), Result.accept)
self.assertEqual(self.fsa.membership("ab"), Result.reject)
self.assertEqual(self.fsa.membership("abb"), Result.neutral)
self.assertEqual(self.fsa.membership("aba"), Result.accept)
self.assertEqual(self.fsa.membership("abaab"), Result.accept)
def test_incomplete(self):
alphabet = ["a", "b"]
s = [1, 2, 3, 4]
transitions = [(1, "b", 1), (1, "a", 2),
(2, "a", 3), (2, "b", 4)]
acc_s = [2, 3]
rej_s = [4]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
self.assertFalse(dfa.is_complete())
dfa.transitions = None
dfa.complete(sink=4)
self.assertTrue(dfa.is_complete())
def test_del_dead_states(self):
alphabet = ["a", "b"]
s = [1, 2, 3, 4]
transitions = [(1, "b", 1), (1, "a", 2),
(2, "a", 3), (2, "b", 4),
(3, "a", 4), (3, "b", 3),
(4, "a", 2), (4, "b", 1)]
acc_s = [2, 3]
rej_s = [4]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
dfa.del_states({4})
self.assertFalse(dfa.is_complete())
self.assertEqual(len(set(dfa.itertransitions())), 4)
dfa.complete(6)
self.assertTrue(dfa.is_complete())
self.assertEqual(len(set(dfa.itertransitions())), 8)
dfa.del_dead_states()
self.assertTrue(6 not in dfa.states)
self.assertEqual(len(set(dfa.itertransitions())), 4)
self.assertFalse(dfa.is_complete())
def test_del_unreachable_states(self):
alphabet = ["a", "b"]
s = [1, 2, 3, 4, 5]
transitions = [(1, "b", 1), (1, "a", 2),
(2, "a", 3), (2, "b", 4),
(3, "a", 4), (3, "b", 3),
(4, "a", 2), (4, "b", 1),
(5, "a", 2), (5, "b", 3)]
acc_s = [2, 3]
rej_s = [4, 5, 1]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
self.assertTrue(5 not in dfa.reachable_states())
self.assertTrue(dfa.is_complete())
dfa.del_unreachable_states()
self.assertTrue(dfa.is_complete())
def test_DFA_numeric_states(self):
alphabet = ["a", "b"]
s = [1, 2, 3, 4]
transitions = [(1, "b", 1), (1, "a", 2),
(2, "a", 3), (2, "b", 4),
(3, "a", 4), (3, "b", 3),
(4, "a", 2), (4, "b", 1)]
acc_s = [2, 3]
rej_s = [4]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
self.assertEqual(dfa.membership(""), Result.neutral)
self.assertEqual(dfa.membership("a"), Result.accept)
self.assertEqual(dfa.membership("ab"), Result.reject)
self.assertEqual(dfa.membership("abb"), Result.neutral)
self.assertEqual(dfa.membership("aba"), Result.accept)
self.assertEqual(dfa.membership("abaab"), Result.accept)
def test_union_intersection_subset(self):
alphabet = [0, 1]
s = ["A", "B"]
transitions = [("A", 0, "A"), ("A", 1, "B"),
("B", 0, "A"), ("B", 1, "A")]
acc_s = ["B"]
rej_s = ["A"]
dfa1 = DFA.build(alphabet=alphabet, states=s, start_state="A",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
s = ["C", "D"]
transitions = [("C", 0, "D"), ("C", 1, "C"),
("D", 0, "D"), ("D", 1, "C")]
acc_s = ["C"]
rej_s = ["D"]
dfa2 = DFA.build(alphabet=alphabet, states=s, start_state="D",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
udfa = dfa1 | dfa2
idfa = dfa1 & dfa2
self.assertEqual(idfa.membership([0, 1]), Result.accept)
self.assertEqual(idfa.membership([1]), Result.accept)
self.assertEqual(dfa1.membership([0, 1, 1]), Result.reject)
self.assertEqual(udfa.membership([0, 1, 1]), Result.accept)
self.assertTrue(dfa1.issubset(dfa2))
def test_equality(self):
alphabet = [0, 1]
s = ["A", "B"]
# lang: 0, 01, 011, 0111, ... = 01*
transitions = [("A", 0, "B"), ("B", 1, "B")]
acc_s = ["B"]
rej_s = ["A"]
dfa1 = DFA.build(alphabet=alphabet, states=s, start_state="A",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
s = ["X", "Y", "Z"]
transitions = [("X", 0, "Y"), ("Y", 1, "Z"), ("Z", 1, "Z")]
acc_s = ["Y", "Z"]
rej_s = ["X"]
dfa2 = DFA.build(alphabet=alphabet, states=s, start_state="X",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
# test incompleteness
self.assertEqual(dfa1.membership([1, 1, 1]), Result.reject)
dfa1.complete()
dfa2.complete()
self.assertTrue(dfa1 == dfa2)
# test equivalence pairs
eq_pairs = {('A', 'X'), ('B', 'Y'), ('B', 'Z'), (-1, -1)}
self.assertEqual(dfa1.equivalent_states(dfa2), eq_pairs)
udfa = dfa1 | dfa2
self.assertEqual(udfa.membership([0]), Result.accept)
self.assertEqual(udfa.membership([1]), Result.reject)
self.assertEqual(udfa.membership([0, 1]), Result.accept)
self.assertEqual(udfa.membership([0, 1, 1]), Result.accept)
self.assertEqual(udfa.membership([0, 0, 0]), Result.reject)
self.assertEqual(udfa.membership([0, 1, 1, 1, 1, 1]), Result.accept)
def test_minimize(self):
a = "a"
b = "b"
alphabet = [a, b]
s = [0, 1, 2, 3, 4, 5, 6]
transitions = [(0, a, 0), (0, b, 0),
(1, a, 2), (1, b, 5),
(2, a, 3), (2, b, 0),
(3, a, 3), (3, b, 4),
(4, a, 3), (4, b, 0),
(5, a, 6), (5, b, 0),
(6, a, 0), (6, b, 5)]
acc_s = [0, 5]
rej_s = [1, 2, 4, 6]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
mindfa = dfa.minimize()
# quick'n'dirty test whether automaton is equivalent
brzozowski = nfa.NFA.viewDFA(dfa).reverse().determinize()
brzozowski = nfa.NFA.viewDFA(brzozowski).reverse().determinize()
self.assertTrue(mindfa == dfa)
self.assertTrue(brzozowski == mindfa)
self.assertEqual(mindfa.membership("ab"), Result.accept)
self.assertEqual(mindfa.membership("bb"), Result.accept)
self.assertEqual(mindfa.membership("baa"), Result.accept)
self.assertEqual(mindfa.membership("aabb"), Result.accept)
self.assertEqual(mindfa.membership("aaaaaaaaaabbbbb"), Result.accept)
def test_reversal(self):
a = "a"
b = "b"
alphabet = [a, b]
s = [0, 1, 2, 3, 4, 5, 6]
transitions = [(0, a, 0), (0, b, 0),
(1, a, 2), (1, b, 5),
(2, a, 3), (2, b, 0),
(3, a, 3), (3, b, 4),
(4, a, 3), (4, b, 0),
(5, a, 6), (5, b, 0),
(6, a, 0), (6, b, 5)]
acc_s = [0, 5]
rej_s = [1, 2, 4, 6]
dfa = DFA.build(alphabet=alphabet, states=s, start_state=1,
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
rev = dfa.reverse()
self.assertEqual(rev.membership("ba"), Result.accept)
self.assertEqual(rev.membership("aaaaaabb"), Result.accept)
self.assertEqual(rev.membership("ab"), Result.reject)
self.assertEqual(rev.membership("aba"), Result.accept)
self.assertEqual(rev.membership("abaab"), Result.accept)
def test_concat(self):
alphabet = [0, 1]
s = ["A", "B"]
# lang: 0, 01, 011, 0111, ... = 01*
transitions = [("A", 0, "B"), ("B", 1, "B")]
acc_s = ["B"]
rej_s = ["A"]
dfa1 = DFA.build(alphabet=alphabet, states=s, start_state="A",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
s = ["X", "Y", "Z"]
transitions = [("X", 0, "Y"), ("Y", 1, "Z"), ("Z", 1, "Z")]
acc_s = ["Y", "Z"]
rej_s = ["X"]
dfa2 = DFA.build(alphabet=alphabet, states=s, start_state="X",
accept_states=acc_s, reject_states=rej_s,
transitions=transitions)
dfa3 = (dfa1 * dfa2).minimize().rename()
# dfa3 is 01*01*,
self.assertEqual(dfa3.membership([1]), Result.reject)
self.assertEqual(dfa3.membership([0, 0]), Result.accept)
self.assertEqual(dfa3.membership([0, 1, 0]), Result.accept)
self.assertEqual(dfa3.membership([0, 0, 0]), Result.reject)
self.assertEqual(dfa3.membership([0, 1, 1]), Result.reject)
self.assertEqual(dfa3.membership([0, 1, 1, 1, 1, 1]), Result.reject)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| StarcoderdataPython |
357032 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 15:30:15 2021
@author: altair
"""
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
df = web.DataReader('AAPL', data_source= 'yahoo', start= '2015-01-01', end= '2020-12-31')
print(df)
print(df.shape)
# visualize closing price
plt.figure(figsize=(16,8))
plt.title('Close Price History', fontsize= 18)
plt.plot(df['Close'])
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD', fontsize= 18)
plt.show()
# create new dataframe with close column
data = df.filter(['Close'])
# convert the dataframe to a numpy array
dataset = data.values
# convert the number of rows to train the model on
training_data_len = math.ceil(len(dataset) * 0.8)
print('\n training_data_len:',training_data_len)
#scale the data
scaler = MinMaxScaler(feature_range= (0, 1))
scaled_data = scaler.fit_transform(dataset)
print('\nscaled_data', scaled_data)
# create the training data set, scaled training dataset
train_data = scaled_data[0:training_data_len, :]
#split the daa into x_train and y_train dataset
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i,0])
if i <= 60:
print(x_train)
print(y_train)
print()
# convert x_train and y_train to numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
#reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
print('\nx_train reshape:',x_train.shape)
# build the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences= True, input_shape= (x_train.shape[1],1)))
model.add(LSTM(50,return_sequences= False))
model.add(Dense(25))
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss= 'mean_squared_error')
#train the model
model.fit(x_train, y_train, batch_size= 1, epochs= 1)
# create the testing data, create dataset x_test, y_test
test_data = scaled_data[training_data_len - 60: , :]
x_test = []
y_test = dataset[training_data_len, :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
# convert the data to a numpy array
x_test = np.array(x_test)
# reshape the data
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1],1))
# get the models predicted price values
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
# rmse
rmse = np.sqrt(np.mean(predictions - y_test)**2)
print('\nRMSE:', rmse)
# plot
train = data[:training_data_len]
valid = data[training_data_len:]
valid['Predictions'] = predictions
# visualize
plt.figure(figsize=(16,8))
plt.title('LSTM Model for Stock Predictions', fontsize=18)
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD', fontsize=18)
plt.plot(train['Close'])
plt.plot(valid[['Close' , 'Predictions']])
plt.legend(['Train', 'Valid', 'Predictions'], loc= 'lower right')
plt.show()
# get the quote
apple_quote = web.DataReader('AAPL', data_source='yahoo', start='2010-01-01', end= '2020-12-31')
new_df = apple_quote.filter(['Close'])
last_60_days = new_df[-60:].values
# scale the data
last_60_days_scaled = scaler.transform(last_60_days)
X_test = []
Y_test = []
X_test.append(last_60_days_scaled)
# convert into numpy array
X_test = np.array(X_test)
# reshape the data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1],1))
# get the predicted scaled price
pred_price = model.predict(X_test)
# undo the scaling
pred_price = scaler.inverse_transform(pred_price)
print('Pred_price:', pred_price)
# get the quote
apple_quote2 = web.DataReader('AAPL', data_source='yahoo', start='2020-12-31', end= '2020-12-31')
print('Close price:', apple_quote2['Close'])
##########################################
# linear regression
############################################
| StarcoderdataPython |
9601837 | <reponame>ilmcconnell/Cosmos<filename>cosmos/ingestion/ingest/process/detection/src/torch_model/model/__init__.py
"""
Model specification dir
"""
| StarcoderdataPython |
9717456 | #!/usr/bin/env python
# coding: utf-8
import glob
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import yaml
from IPython.display import display
from tqdm.notebook import tqdm
from transformers import BertConfig
import BERT_per_label
import BERT_per_lvl
# Data analysis
def data_analysis(data_set):
"""
Dataset analysis of occurrences and histograms of the training and test sets for
:param data_set: dataset to analyze
:return: display of the head of the dataset followed by a textual description fo the different categories levels. Then the same but in plot. Once for training and once for test
"""
print("Dataset :", data_set)
data = pd.read_csv(data_set + "/train.csv") # Load dataset
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"}) # For DBpedia rename columns
data = data[['Text', "Cat1", "Cat2", "Cat3"]]
display(data.head())
# Function inside function is not ideal but there were problems from global variables when converting from jupyter lab
def plot_histo(column):
"""
Plots a histogram of the frequency of the length for the parameter column, for the training dataset defined in the upper function
:param column: the category to analyse
:return: plot figure
"""
text_len = data[column].str.len()
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.title("Token lenght for {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column, text_len.min(), text_len.max(), text_len.mean()))
def get_info(column):
"""
Label appearance analysis per categories
:param column: the category to analyse
:return: information about how often each label appears
"""
name, count = np.unique(data[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
print("Amount of appearances for {}: \n * unique values {} \n * Minimal: {} appears {} times \n * Maximal: {} appears {} times \n * in average {:.2f} times. \n ".format(
column, len(count), name[count.argmin()], count.min(), name[count.argmax()], count.max(), count.mean()))
print("Training data \nContains {} examples".format(data.shape[0]))
get_info("Cat1")
get_info("Cat2")
get_info("Cat3")
plt.figure(figsize=(20, 5))
plt.subplot(1, 4, 1)
plot_histo("Text")
plt.subplot(1, 4, 2)
plot_histo("Cat1")
plt.subplot(1, 4, 3)
plot_histo("Cat2")
plt.subplot(1, 4, 4)
plot_histo("Cat3")
plt.savefig("./visualizations/" + data_set + "/Data_analysis.svg", dpi=200, format="svg", facecolor="white")
plt.show()
# Same as above but on the test dataset
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
def plot_histo(column):
"""
Plots a histogram of the frequency of the length for the parameter column, for the test dataset defined in the upper function
:param column: the category to analyse
:return: plot figure
"""
text_len = test[column].str.len()
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.title("Token lenght for {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column, text_len.min(),
text_len.max(),
text_len.mean()))
def get_info(column):
"""
Label appearance analysis per categories
:param column: the category to analyse
:return: information about how often each label appears
"""
name, count = np.unique(test[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
print(
"Amount of appearances for {}: \n * unique values {} \n * Minimal: {} appears {} times \n * Maximal: {} appears {} times \n * in average {:.2f} times. \n ".format(
column, len(count), name[count.argmin()], count.min(), name[count.argmax()], count.max(), count.mean()))
print("Test data \nContains {} examples".format(test.shape[0]))
get_info("Cat1")
get_info("Cat2")
get_info("Cat3")
plt.figure(figsize=(20, 5))
plt.subplot(1, 4, 1)
plot_histo("Text")
plt.subplot(1, 4, 2)
plot_histo("Cat1")
plt.subplot(1, 4, 3)
plot_histo("Cat2")
plt.subplot(1, 4, 4)
plot_histo("Cat3")
plt.savefig("./visualizations/" + data_set + "/Data_analysis_test.svg", dpi=200, format="svg", facecolor="white")
plt.show()
def plot_sub_cat(dataset, columns, spacer=2):
"""
Plot the amount of appearances of categories labels of a level gropued by columns
:param dataset: dataset to plot
:param columns: list of the form [["Cats to", "group by"], "Cat to plot"]
:param spacer: separation between subclases
:return: plot figure
"""
# auxiliar dataset
df_empty = pd.DataFrame({'A': []})
# Add columns to grup by
df_empty['Text'] = dataset[columns[0][0]]
if len(columns[0]) == 2:
df_empty['Text'] = dataset[columns[0][1]].str.cat(df_empty['Text'], sep=". ")
# Generate upper groups
name, count = np.unique(df_empty['Text'], return_index=False, return_inverse=False, return_counts=True, axis=None)
names_undercat_vec = []
count_undercat_vec = []
entries = 0
# Create groups to plot
for overcat in name:
aux = dataset.loc[df_empty['Text'] == overcat]
names_undercat, count_undercat = np.unique(aux[columns[1]], return_index=False, return_inverse=False, return_counts=True, axis=None)
names_undercat_vec.append(names_undercat)
names_undercat_vec.append(np.repeat(" ", spacer))
count_undercat_vec.append(count_undercat)
entries += len(names_undercat)
# Get label names
plot_labels = [item for sublist in names_undercat_vec for item in sublist][:-2]
indv_len = np.array([len(x) for x in count_undercat_vec])
plot_pos = np.array([len(x) for x in names_undercat_vec][:-1])
plot_pos = np.append(0, np.cumsum(plot_pos))
y_pos = np.arange(len(plot_labels))
# Plot groups
ranges = [range(plot_pos[i], plot_pos[i + 1]) for i in range(0, len(plot_pos) - 1, 2)]
for i, coun in enumerate(count_undercat_vec):
bar_plot = plt.barh(ranges[i], coun, align='center', label=name[i])
plt.title("Amount of appearances for under {} grouped by over categories {}:".format(columns[1], columns[0]))
plt.ylabel("Label")
plt.xscale("log")
plt.xlabel("Amount of appearances")
plt.yticks(y_pos, plot_labels)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
def plot_histo_lim(dataset, column, max_len):
"""
Plot histogram of token length of used data when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
text_len = np.array([x if x <= max_len else max_len for x in dataset[column].str.len()])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title("Used token lenght for {} constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column,
max_len,
text_len.min(),
text_len.max(),
text_len.mean()))
def plot_histo_label_lim(dataset, column, cats, max_len):
"""
Plot histogram of token length of used data depending on categorie levels when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = np.array([x if x <= max_len else max_len for x in df_empty['Text'].str.len()])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Used token lenght for {}, {} as input constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def plot_histo(dataset, column, max_len):
"""
Plot histogram of token length of data with an indication where it would be cut if constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
text_len = dataset[column].str.len()
n, _, _ = plt.hist(text_len, bins=text_len.max())
plt.vlines(max_len, 0, n.max(), color='r')
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght for {}, indicating {} as max len: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column,
max_len,
text_len.min(),
text_len.max(),
text_len.mean()))
def plot_histo_label(dataset, column, cats, max_len):
"""
Plot histogram of token length of data with an indication where it would be cut if constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = df_empty['Text'].str.len()
n, _, _ = plt.hist(text_len, bins=text_len.max())
plt.vlines(max_len, 0, n.max(), color='r')
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght for {}, {} as input, indicating {} as max len: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def plot_histo_targets(dataset, column):
"""
Histogram of appearances for each string
:param dataset: dataset to analyse
:param column: column from which to extract the strings
:return: horizontal amount histogram
"""
plt.ylabel("Label")
plt.xscale("log")
plt.xlabel("Amount of appearances")
name, count = np.unique(dataset[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
plt.title("Amount of appearances for {}: \n Minimal: {} appears {} times \n Maximal: {} appears {} times".format(column,
name[
count.argmin()],
count.min(),
name[
count.argmax()],
count.max()))
y_pos = np.arange(len(name))
bar_plot = plt.barh(y_pos, count, align='center')
plt.yticks(y_pos, name)
def get_lengths(data_set):
"""
Gets the lengths of the texts for both training and test for a dataset
:param data_set: dataset to analyze
:return: length of each text in dataset
"""
data = pd.read_csv(data_set + "/train.csv")
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
all_lengths_ama = pd.concat([data["Text"].str.len(), test["Text"].str.len()])
return all_lengths_ama
def comparative_text_len():
"""
Compare textual token length for both datasets
:return: plot histogram
"""
ama = get_lengths("amazon")
dbp = get_lengths("dbpedia")
plt.figure(figsize=(10, 10))
plt.hist(dbp, bins=int(dbp.max() / 2), label="DBPedia", alpha=1)
plt.hist(ama, bins=int(ama.max() / 2), label="Amazon", alpha=1)
plt.xlim(0, 5000)
plt.yscale("log")
plt.legend()
plt.xlabel("Number of characters per 'Text' input")
plt.ylabel("Amount of ocurances")
def plot_histo_targets_len(dataset, column):
"""
Histogram of length frequency for each string
:param dataset: dataset to analyse
:param column: column from which to extract the strings
:return: horizontal amount histogram
"""
plt.ylabel("Label")
plt.xlabel("Token lenght")
name, count = np.unique(dataset[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
lengths = np.array([len(x) for x in name])
plt.title("Token length for {}: \n Minimal: {} is {} tokens long \n Maximal: {} is {} tokens long".format(column, name[lengths.argmin()], lengths.min(), name[lengths.argmax()], lengths.max()))
y_pos = np.arange(len(name))
bar_plot = plt.barh(y_pos, lengths, align='center')
plt.yticks(y_pos, name)
def plot_histo_lost(dataset, column, cats, max_len):
"""
Plot histogram of token length of lost data depending on categories levels when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
if cats != []:
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = np.array([x - max_len for x in df_empty['Text'].str.len() if x > max_len])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght of lost information for {}, {} as input constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def data_analysis_fixed_len(data_set, max_len=100):
"""
Plot the results of data analysis for a fixed lenght
:param data_set: dataset to analyse
:param max_len: maximal token length, i.e. constain
:return: Plot with multiple subplots and textual description of the dataset to analyse
"""
print("Dataset :", data_set)
data = pd.read_csv(data_set + "/train.csv")
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
data = data[['Text', "Cat1", "Cat2", "Cat3"]]
display(data.head())
print("Training data \nContains {} examples".format(data.shape[0]))
spec = gridspec.GridSpec(7, 3, wspace=0.5, hspace=1)
fig = plt.figure(figsize=(40, 30))
fig.add_subplot(spec[0, 0])
plot_histo_targets(data, "Cat1")
fig.add_subplot(spec[0, 1])
plot_histo_targets(data, "Cat2")
fig.add_subplot(spec[0, 2])
plot_histo_targets(data, "Cat3")
fig.add_subplot(spec[1, 0])
plot_histo_targets_len(data, "Cat1")
fig.add_subplot(spec[1, 1])
plot_histo_targets_len(data, "Cat2")
fig.add_subplot(spec[1, 2])
plot_histo_targets_len(data, "Cat3")
fig.add_subplot(spec[2, 0])
plot_histo(data, "Text", max_len)
fig.add_subplot(spec[2, 1])
plot_histo_label(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[2, 2])
plot_histo_label(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[3, 0])
plot_histo_lim(data, "Text", max_len)
fig.add_subplot(spec[3, 1])
plot_histo_label_lim(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[3, 2])
plot_histo_label_lim(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[4, 0])
plot_histo_lost(data, "Text", [], max_len)
fig.add_subplot(spec[4, 1])
plot_histo_lost(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[4, 2])
plot_histo_lost(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[5:, 0])
plot_sub_cat(data, [["Cat1"], "Cat2"])
fig.add_subplot(spec[5:, 2])
plot_sub_cat(data, [["Cat2", "Cat1"], "Cat3"])
plt.savefig("./visualizations/" + data_set + "/Data_analysis_complete_training.png", dpi=200, format="png",
facecolor="white")
plt.show()
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
test = test[['Text', "Cat1", "Cat2", "Cat3"]]
print("Test data \nContains {} examples".format(test.shape[0]))
fig = plt.figure(figsize=(40, 25))
fig.add_subplot(spec[0, 0])
plot_histo_targets(test, "Cat1")
fig.add_subplot(spec[0, 1])
plot_histo_targets(test, "Cat2")
fig.add_subplot(spec[0, 2])
plot_histo_targets(test, "Cat3")
fig.add_subplot(spec[1, 0])
plot_histo_targets_len(test, "Cat1")
fig.add_subplot(spec[1, 1])
plot_histo_targets_len(test, "Cat2")
fig.add_subplot(spec[1, 2])
plot_histo_targets_len(test, "Cat3")
fig.add_subplot(spec[2, 0])
plot_histo(test, "Text", max_len)
fig.add_subplot(spec[2, 1])
plot_histo_label(test, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[2, 2])
plot_histo_label(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[3, 0])
plot_histo_lim(test, "Text", max_len)
fig.add_subplot(spec[3, 1])
plot_histo_label_lim(test, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[3, 2])
plot_histo_label_lim(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[4, 0])
plot_histo_lost(test, "Text", [], max_len)
fig.add_subplot(spec[4, 1])
plot_histo_lost(test, "Text", ["Cat2"], max_len)
fig.add_subplot(spec[4, 2])
plot_histo_lost(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[5:, 0])
plot_sub_cat(test, [["Cat1"], "Cat2"])
fig.add_subplot(spec[5:, 2])
plot_sub_cat(test, [["Cat2", "Cat1"], "Cat3"])
plt.savefig("./visualizations/" + data_set + "/Data_analysis_complete_test.png", dpi=200, format="png",
facecolor="white")
plt.show()
######################################################################
# Result table generator
def pad(list_to_pad):
"""
Pad list for runs of different epoch length.
:param list_to_pad:
:return: padded list
"""
lens = [len(a) for a in list_to_pad]
aux = [np.pad(elem, (0, np.max(lens) - len(elem)), 'edge') for elem in list_to_pad]
return aux
def get_plot_values(list_of_values):
"""
Get 95% confidence interval for plotting https://www.wikiwand.com/en/Confidence_interval
:param list_of_values:
:return: mean, maxim and minim lines for plotting
"""
list_of_values = pad(list_of_values)
std = np.std(list_of_values, axis=0)
mean = np.mean(list_of_values, axis=0)
maxim = mean + 1.96 * (std / np.sqrt(len(mean) + 1)) # np.max(f1_score_list, axis=0)
minim = mean - 1.96 * (std / np.sqrt(len(mean) + 1))
return mean, maxim, minim
def get_model_plot(model):
"""
Load history from file and prepare to plot
:param model: path of model to plot
:return:
"""
# Load histories for all runs
histories = [filename for filename in glob.iglob(model + "/**/rep_and_histo.npz", recursive=True)]
histo_list_acc = []
histo_list_f1 = []
# For each history get the accuracy and f1 score to plot
for hist in histories:
arr = np.load(hist, allow_pickle=True)
histo = arr['hist'].item(0)
try:
histo_list_acc.append(np.array(histo['val_accuracy']))
histo_list_f1.append(np.array(histo['val_f1_score']))
except: # Old DBpedia runs used a custom F! macro score output before I found out tensorflow addons. It would take 2 to 3 weeks to re run the experiments
histo_list_acc.append(np.array(arr['accu_list']))
histo_list_f1.append(np.array(arr['f1_score_list']))
plot_histo_list_acc = get_plot_values(histo_list_acc)
plot_histo_list_f1 = get_plot_values(histo_list_f1)
title = model[31:]
lvl = int(title[title.find("lvl") + 3])
return title, lvl, plot_histo_list_f1, plot_histo_list_acc
def plot_curves(models):
"""
Plot the validation accuracy and F1 macro scores for the given models
:param models: list of models to plot
:return: plot wit all metrcis as mean with 95% confidence interval
"""
fig = plt.figure(figsize=(30, 8))
spec = gridspec.GridSpec(ncols=2, nrows=1, figure=fig)
lines = ["", '-', '--', ':']
max_len = 0
fig.suptitle("Validation curves while Training")
fig.add_subplot(spec[0, 0])
for model in models:
title, lvl, plot_histo_list_f1, plot_histo_list_acc = get_model_plot(
model) # title, f1 [mean, maxim, minim], accu [mean, maxim, minim]
length = plot_histo_list_f1[0].shape[0] + 1
x = range(1, length)
if length > max_len:
max_len = length
plt.plot(x, plot_histo_list_f1[0], lines[lvl], label="{} mean f1".format(title))
plt.fill_between(x, plot_histo_list_f1[2], plot_histo_list_f1[1], alpha=0.5)
plt.xlabel("epoch")
plt.grid()
plt.xticks(range(1, max_len + 1, 2))
plt.title("mean f1 score with confidence")
fig.add_subplot(spec[0, 1])
for model in models:
title, lvl, plot_histo_list_f1, plot_histo_list_acc = get_model_plot(
model) # title, f1 [mean, maxim, minim], accu [mean, maxim, minim]
length = plot_histo_list_f1[0].shape[0] + 1
x = range(1, length)
plt.plot(x, plot_histo_list_acc[0], lines[lvl], label="{} mean".format(title))
plt.fill_between(x, plot_histo_list_acc[2], plot_histo_list_acc[1], alpha=0.5)
plt.xlabel("epoch")
plt.legend()
plt.grid()
plt.xticks(range(1, max_len + 1, 2))
plt.title("mean accuracy score with confidence")
plt.savefig("./visualizations/trainig_curves.png", dpi=200, format="png", facecolor="white")
plt.show()
def predict_per_label(path, input_ids, attention_mask, batch):
"""
For per label load the tensorflow graph and predict per level
:param path: path to the saved model to load
:param input_ids: inputs for the prediction
:param attention_mask: attention_mask corresponding to the inputs_ids
:param batch: batch size
:return: predictions of the model
"""
# Because of the pretrained model the saved models can't be loaded as keras model but only as tensorflow graph
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"] # Get the default tensorflow graph function
test_pred = np.array([])
top = input_ids.shape[0] # Maximal number of examples
for i in range(batch, top + batch, batch): # in a batched way predict
test_pred = np.concatenate((test_pred, np.argmax(
f(input_ids=input_ids[i - batch:i], attention_mask=attention_mask[i - batch:i])['Cat'], axis=1))) # Get the cat output and convert probabilities to lables
return test_pred
def evaluate(path, x, batch, test_target):
"""
For per level and flatt load the tensorflow graph, predict and evaluate
:param path: path to the saved model to load
:param x: inputs_ids and attention_mask
:param batch: batch size
:return: predictions of the
:param test_target: labels to predict against
:return: f1_score, accuracy_score of the prediction
"""
# Because of the pretrained model the saved models can't be loaded as keras model but only as tensorflow graph
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"] # Get the default tensorflow graph function
test_pred = np.array([])
top = x['input_ids'].shape[0] # Maximal number of examples
for i in range(batch, top + batch, batch): # in a batched way predict
test_pred = np.concatenate((test_pred, np.argmax(
f(input_ids=x['input_ids'][i - batch:i], attention_mask=x['attention_mask'][i - batch:i])['Cat'], axis=1))) # Get the cat output and convert probabilities to lables
f1_score = sklearn.metrics.f1_score(test_target, test_pred, average='macro')
accuracy_score = sklearn.metrics.accuracy_score(test_target, test_pred)
return f1_score, accuracy_score
def add_cats(string, lvl):
"""
Concatenates the needed categories and target types, used for the result table
:param string: target type
:param lvl: lvl to concatenate to
:return: concatenate string
"""
for i in range(1, lvl):
string += " Cat" + str(i) + ","
string = string + " Text"
return string
def create_results(model):
"""
Determines which function to call to generate the table rows depending on the path to the given model, since this has all relevant configuration information
:param model: path to the model to analyse
:return: the result table rows corresponding to the analysis of the given model
"""
# Get configuration from model path
title = model[31:]
dataset = title[:title.find("/")]
lvl = int(title[title.find("lvl") + 3])
tokens = int(title[title.rfind("/") + 1:title.rfind("T")])
epochs = title[title.rfind("T") + 2:title.rfind("e")]
batch = int(title[title.rfind("e") + 2:title.rfind("b")])
test_labels = None
if title.find("flatt__") + 1: # +1 to get 0 when not found and so false
# For flat models
test_labels = None # Depending on what shoud be tested
train_in = "Text"
test_in = "Text"
return write_results(dataset, lvl, tokens, epochs, batch, test_labels, train_in, test_in, model)
elif title.find("Predicted") + 1:
# For hierarchical per level predicted models
train_in = add_cats("Predicted", lvl)
test_in = train_in
# get the config of the model to get where the predicted labels came from
conf = "./Configs/" + dataset + "_config_lvl" + str(lvl) + "_h_p_bert-base-uncased.yaml"
with open(conf) as f:
arguments = yaml.load(f, Loader=yaml.FullLoader)
test_labels = arguments["test_labels"] # path to test labels
return write_results(dataset, lvl, tokens, epochs, batch, test_labels, train_in, test_in, model)
elif title.find("per_label") + 1:
# For per label models
return write_results_per_label(dataset, lvl, tokens, epochs, batch, model)
else:
# For hierarchical per level target models
train_in = add_cats("Target", lvl)
# get the config of the model to get which target labels where used
conf = "./Configs/" + dataset + "_config_lvl" + str(lvl) + "_h_t_bert-base-uncased.yaml"
with open(conf) as f:
arguments = yaml.load(f, Loader=yaml.FullLoader)
test_labels = arguments["test_labels"] # path to test labels
# For target trained test on target and on predicted labels
return np.vstack((write_results(dataset, lvl, tokens, epochs, batch,
[['Target'] + ['Cat' + str(i) for i in range(1, lvl)]], train_in, train_in,
model),
write_results(dataset, lvl, tokens, epochs, batch, test_labels, train_in,
add_cats("Predicted", lvl), model)))
def write_results( dataset, lvl, tokens, epochs, batch, test_labels, train_in, test_in, model):
"""
evaluate all runs for a model and generate the result table row with all results. For flat and per_level approaches
:param dataset: dataset to test on
:param lvl: lvl to test
:param tokens: maximal token length
:param epochs: maximal epochs the model was trained on
:param batch: batch size for evaluating, same as for training
:param test_labels: labels to used for testing
:param train_in: what was used for training
:param test_in: what will used for testing
:param model: path to model
:return: the result table row corresponding to the analysis of the given model
"""
# Simulate config file
arguments = {'model_name': 'bert-base-uncased',
'max_length': tokens,
'epochs': epochs,
'batch_size': batch,
'data_path': dataset,
'lvl': lvl,
'test_labels': test_labels}
# Prepare tokenization for evaluation
model_name = arguments['model_name']
config = BertConfig.from_pretrained(model_name)
config.output_hidden_states = False
data, trunest_class_names, test_target = BERT_per_lvl.get_test_data(arguments) # Get test data
x = BERT_per_lvl.get_tokenized(model_name, config, data, tokens) # Tokenize test data
runs = [filename for filename in glob.iglob(model + "/**/model", recursive=True)] # get the 3 runs for each model
res_list = []
for run in runs: # for each run evaluate
res_list.append(evaluate(run, x, batch, test_target)) # f1_score, accuracy_score
# Mean and std for the 3 runs
f1_mean, accu_mean = np.mean(res_list, axis=0)
f1_std, accu_std = np.std(res_list, axis=0)
f1_string = '{:.3f}({:.3f})'.format(f1_mean, f1_std)
acc_string = '{:.3f}({:.3f})'.format(accu_mean, accu_std)
# For the levels not predicted by this model give "-" out
aux = ['-'] * 6
aux[(lvl - 1) * 2] = acc_string
aux[(lvl - 1) * 2 + 1] = f1_string
# Get the maximum of how many epochs the runs trained before early stopping kicked in
_, _, leng, _ = get_model_plot(model)
used_ep = len(leng[0])
# Format data to generate a row of the results table
table_data = ["Per_lvl", dataset, '{}({})'.format(epochs, used_ep), tokens, batch, len(runs), train_in, "Cat" + str(lvl), test_in] + aux
return table_data
def make_table(models):
"""
For all experiments in models evaluate and create a result table as pnadas dataframe
:param models: list of models to evaluate
:return: result table as pandas dataframe
"""
# Evaluate all models
res = np.vstack([create_results(model) for model in tqdm(models)])
# Convert to dataframe
df = pd.DataFrame(np.vstack(res),
columns=["Type", "Dataset", "Epochs", "Tokens", "Batch size", "Runs", "Train Input", "Output",
"Test Input", "Cat1 accuracy", "Cat1 F1 score macro", "Cat2 accuracy",
"Cat2 F1 score macro", "Cat3 accuracy", "Cat3 F1 score macro"])
df = df.sort_values(by=['Dataset', 'Output', "Train Input", "Test Input"], ascending=[True, True, False, False])
return df
def get_scores(test_pred, model, batch, x, test_target, classes, runs, dataset, lvl, tokens, epochs, train_in, test_in, prediction_only=False):
"""
evaluate runs for a per-label model and generate the result table row with all results.
:param test_pred: inputs labels to separate by
:param model: path to model
:param batch: path to model
:param x: inputs_ids and attention_mask
:param test_target: labels to predict against
:param classes: classes of the upper level, i.e. on what to divide the inputs for each classifier
:param runs: how many runs to mean over
:param dataset: dataset to test on
:param lvl: lvl to test
:param tokens: maximal token length
:param epochs: maximal epochs the model was trained on
:param train_in: what was used for training
:param test_in: what will used for testing
:param prediction_only: boolen to determine if only prediction or also evaluation
:return: if prediction_only true then only predictions else the result table row corresponding to the analysis of the given model
"""
score = []
# for run in range(2, runs + 1): # If the lower runs into error
for run in range(1, runs + 1): # For each run
pred = np.zeros(test_target.shape[0])
for label_class in range(classes): # for each upper level label
indices_tf = [[i] for i, j in enumerate(test_pred) if j == label_class] # get indices for sliccing the inputs
input_ids = tf.gather_nd(x['input_ids'], indices_tf)
attention_mask = tf.gather_nd(x['attention_mask'], indices_tf)
class_model = model + "/Run" + str(run) + "/Class" + str(label_class) + "/model" # Get model for the label_class
class_pred = predict_per_label(class_model, input_ids, attention_mask, batch).astype(int) # Get predictions for the label_class
mapping = np.load(model + "/Run" + str(run) + "/Class" + str(label_class) + "/tested__/rep_and_histo.npz")["test_mapping"].astype(int) # Load the local to global lable mapping from file
class_pred = mapping[class_pred] # Map local labels to global
# Insert predictions in the places where the original not grouped examples where
indices = np.where(test_pred == label_class)[0]
pred[indices] = class_pred
# Evaluate over all subclasses for each run
f1_score = sklearn.metrics.f1_score(test_target, pred, average='macro')
accuracy_score = sklearn.metrics.accuracy_score(test_target, pred)
score.append([f1_score, accuracy_score])
if prediction_only:
return pred
# Average over all runs
f1_mean, accu_mean = np.mean(score, axis=0)
f1_std, accu_std = np.std(score, axis=0)
f1_string = '{:.3f}({:.3f})'.format(f1_mean, f1_std)
acc_string = '{:.3f}({:.3f})'.format(accu_mean, accu_std)
# For the levels not predicted by this model give "-" out
aux = ['-'] * 6
aux[(lvl - 1) * 2] = acc_string
aux[(lvl - 1) * 2 + 1] = f1_string
# Format data to generate a row of the results table
table_data = ["Per_label", dataset, epochs, tokens, batch, runs, train_in, "Cat" + str(lvl), test_in] + aux
return table_data
def write_results_per_label(dataset, lvl, tokens, epochs, batch, model):
"""
evaluate all runs for a per-label model and generate the result table row with all results.
:param dataset: dataset to test on
:param lvl: lvl to test
:param tokens: maximal token lenght
:param epochs: maximal epochs the model was trained on
:param batch: batch size for evaluating, same as for training
:param model: path to model
:return: the result table rows corresponding to the analysis of the given model once on target test and once on predicted test
"""
# Get config for the model
conf = "./Configs/" + dataset + "_config_lvl" + str(lvl) + "_per_label.yaml"
with open(conf) as f:
arguments = yaml.load(f, Loader=yaml.FullLoader)
train_in = "Text divided per Target Cat" + str(lvl - 1)
test_in = "Text divided per Predicted Cat" + str(lvl - 1)
test_model = arguments["test_model_lvl1"] # path to test labels
model_name = arguments['model_name']
# Prepare tokenization for evaluation
config = BertConfig.from_pretrained(model_name)
config.output_hidden_states = False
data, class_names = BERT_per_label.get_upper_label_data(dataset, False, lvl) # Get test data of the upper level
x = BERT_per_lvl.get_tokenized(model_name, config, data, tokens) # Tokenize test data
runs = len([filename for filename in glob.iglob(model + "/**/Run*", recursive=True)]) # get the 3 runs for each model
classes = class_names[lvl - 2].shape[0]
cat_num = str('Cat' + str(lvl - 1))
cat_num_desired = str('Cat' + str(lvl))
predicted_in = predict_per_label(test_model, x['input_ids'], x['attention_mask'], batch) # Get the predicted input labels for level 1, i.e. flatt that is per level
if lvl == 3: # for the third level predict levels 2
second_test_model = arguments['test_model_lvl2']
classes_for_intermediate = class_names[lvl - 3].shape[0]
# Get predictions per label, since prediction_only= True in get_scores only the predicted labels are returned
predicted_in = get_scores(predicted_in, second_test_model, batch, x, np.array(data[str('Cat' + str(lvl - 1))].to_list()), classes_for_intermediate, 2, dataset, lvl - 1, tokens, epochs, train_in, train_in, True)
test_pred = [np.array(data[cat_num].to_list()), predicted_in] # inputs once target once predicted
test_target = np.array(data[cat_num_desired].to_list()) # targets
# Evaluate and generate result table rows
target_in = get_scores(test_pred[0], model, batch, x, test_target, classes, runs, dataset, lvl, tokens, epochs, train_in, train_in)
test_in = get_scores(test_pred[1], model, batch, x, test_target, classes, runs, dataset, lvl, tokens, epochs, train_in, test_in)
return np.vstack((target_in, test_in))
| StarcoderdataPython |
3317738 | import os
import cv2
import time
import face_recognition
import pickle
pTime = 0
cTime = 0
KNOWN_FACES_DIR = 'known_faces'
UNKNOWN_FACES_DIR = 'unknown_faces'
TOLERANCE = 0.7
FRAME_THICKNESS = 2
FONT_THICKNESS = 2
MODEL = 'cnn'
cap = cv2.VideoCapture(0)
known_faces = []
known_names = []
if not os.path.exists(f"./{KNOWN_FACES_DIR}"):
os.mkdir(KNOWN_FACES_DIR)
for name in os.listdir(KNOWN_FACES_DIR):
for filename in os.listdir(f"{KNOWN_FACES_DIR}/{name}"):
encoding = pickle.load(open(f"{name}/{filename}","rb"))
known_faces.append(encoding)
known_names.append(int(name))
if len(known_names) > 0:
next_id = max(known_names)+1
else:
next_id = 0
while True:
success, image = cap.read()
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
locations = face_recognition.face_locations(image,model=MODEL)
encodings = face_recognition.face_encodings(image,locations)
for face_encoding, face_location in zip(encodings,locations):
results = face_recognition.compare_faces(known_faces,face_encoding,TOLERANCE)
match = None
if True in results:
match = known_names[results.index(True)]
print(f"Match Found:{match}")
else:
match = str(next_id)
next_id += 1
top_left = (face_location[3],face_location[0])
bottom_right = (face_location[1],face_location[2])
color = [0,255,0]
cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)
top_left = (face_location[3],face_location[2])
bottom_right = (face_location[1],face_location[2]+22)
cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
cv2.putText(image,match, (face_location[3]+10,face_location[2]+15),cv2.FONT_HERSHEY_SIMPLEX,0.5, (200,200,0))
cv2.putText(image,str(int(fps)),(10,70),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),3)
cv2.imshow(" ",image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | StarcoderdataPython |
3423978 | import os
from os.path import join
from pathlib import Path
from PIL import Image
if __name__ == "__main__":
print('Making B&W')
input_folder = '../../res/dataset/y/splitted/'
output_folder = '../../res/dataset/x/splitted/'
for name in ['train', 'test', 'val']:
Path(join(output_folder, name, 'jpg')).mkdir(parents=True, exist_ok=True)
root, dirs, files = next(os.walk(input_folder))
for dir_ in dirs:
root_n, dirs_n, files_n = next(os.walk(join(root, dir_, 'jpg')))
for file in files_n:
img = Image.open(join(root_n, file)).convert('L')
img.save(join(output_folder, dir_, 'jpg', file))
# TODO1 move train/val/test out of splitted, both in x and y
| StarcoderdataPython |
9667732 | <reponame>LaudateCorpus1/oci-python-sdk
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateVirtualCircuitDetails(object):
"""
UpdateVirtualCircuitDetails model.
"""
#: A constant which can be used with the routing_policy property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "ORACLE_SERVICE_NETWORK"
ROUTING_POLICY_ORACLE_SERVICE_NETWORK = "ORACLE_SERVICE_NETWORK"
#: A constant which can be used with the routing_policy property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "REGIONAL"
ROUTING_POLICY_REGIONAL = "REGIONAL"
#: A constant which can be used with the routing_policy property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "MARKET_LEVEL"
ROUTING_POLICY_MARKET_LEVEL = "MARKET_LEVEL"
#: A constant which can be used with the routing_policy property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "GLOBAL"
ROUTING_POLICY_GLOBAL = "GLOBAL"
#: A constant which can be used with the provider_state property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "ACTIVE"
PROVIDER_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the provider_state property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "INACTIVE"
PROVIDER_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the ip_mtu property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "MTU_1500"
IP_MTU_MTU_1500 = "MTU_1500"
#: A constant which can be used with the ip_mtu property of a UpdateVirtualCircuitDetails.
#: This constant has a value of "MTU_9000"
IP_MTU_MTU_9000 = "MTU_9000"
def __init__(self, **kwargs):
"""
Initializes a new UpdateVirtualCircuitDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param bandwidth_shape_name:
The value to assign to the bandwidth_shape_name property of this UpdateVirtualCircuitDetails.
:type bandwidth_shape_name: str
:param cross_connect_mappings:
The value to assign to the cross_connect_mappings property of this UpdateVirtualCircuitDetails.
:type cross_connect_mappings: list[oci.core.models.CrossConnectMapping]
:param routing_policy:
The value to assign to the routing_policy property of this UpdateVirtualCircuitDetails.
Allowed values for items in this list are: "ORACLE_SERVICE_NETWORK", "REGIONAL", "MARKET_LEVEL", "GLOBAL"
:type routing_policy: list[str]
:param customer_bgp_asn:
The value to assign to the customer_bgp_asn property of this UpdateVirtualCircuitDetails.
:type customer_bgp_asn: int
:param customer_asn:
The value to assign to the customer_asn property of this UpdateVirtualCircuitDetails.
:type customer_asn: int
:param defined_tags:
The value to assign to the defined_tags property of this UpdateVirtualCircuitDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this UpdateVirtualCircuitDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateVirtualCircuitDetails.
:type freeform_tags: dict(str, str)
:param gateway_id:
The value to assign to the gateway_id property of this UpdateVirtualCircuitDetails.
:type gateway_id: str
:param provider_state:
The value to assign to the provider_state property of this UpdateVirtualCircuitDetails.
Allowed values for this property are: "ACTIVE", "INACTIVE"
:type provider_state: str
:param provider_service_key_name:
The value to assign to the provider_service_key_name property of this UpdateVirtualCircuitDetails.
:type provider_service_key_name: str
:param reference_comment:
The value to assign to the reference_comment property of this UpdateVirtualCircuitDetails.
:type reference_comment: str
:param ip_mtu:
The value to assign to the ip_mtu property of this UpdateVirtualCircuitDetails.
Allowed values for this property are: "MTU_1500", "MTU_9000"
:type ip_mtu: str
"""
self.swagger_types = {
'bandwidth_shape_name': 'str',
'cross_connect_mappings': 'list[CrossConnectMapping]',
'routing_policy': 'list[str]',
'customer_bgp_asn': 'int',
'customer_asn': 'int',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'gateway_id': 'str',
'provider_state': 'str',
'provider_service_key_name': 'str',
'reference_comment': 'str',
'ip_mtu': 'str'
}
self.attribute_map = {
'bandwidth_shape_name': 'bandwidthShapeName',
'cross_connect_mappings': 'crossConnectMappings',
'routing_policy': 'routingPolicy',
'customer_bgp_asn': 'customerBgpAsn',
'customer_asn': 'customerAsn',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'gateway_id': 'gatewayId',
'provider_state': 'providerState',
'provider_service_key_name': 'providerServiceKeyName',
'reference_comment': 'referenceComment',
'ip_mtu': 'ipMtu'
}
self._bandwidth_shape_name = None
self._cross_connect_mappings = None
self._routing_policy = None
self._customer_bgp_asn = None
self._customer_asn = None
self._defined_tags = None
self._display_name = None
self._freeform_tags = None
self._gateway_id = None
self._provider_state = None
self._provider_service_key_name = None
self._reference_comment = None
self._ip_mtu = None
@property
def bandwidth_shape_name(self):
"""
Gets the bandwidth_shape_name of this UpdateVirtualCircuitDetails.
The provisioned data rate of the connection. To get a list of the
available bandwidth levels (that is, shapes), see
:func:`list_fast_connect_provider_virtual_circuit_bandwidth_shapes`.
To be updated only by the customer who owns the virtual circuit.
:return: The bandwidth_shape_name of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._bandwidth_shape_name
@bandwidth_shape_name.setter
def bandwidth_shape_name(self, bandwidth_shape_name):
"""
Sets the bandwidth_shape_name of this UpdateVirtualCircuitDetails.
The provisioned data rate of the connection. To get a list of the
available bandwidth levels (that is, shapes), see
:func:`list_fast_connect_provider_virtual_circuit_bandwidth_shapes`.
To be updated only by the customer who owns the virtual circuit.
:param bandwidth_shape_name: The bandwidth_shape_name of this UpdateVirtualCircuitDetails.
:type: str
"""
self._bandwidth_shape_name = bandwidth_shape_name
@property
def cross_connect_mappings(self):
"""
Gets the cross_connect_mappings of this UpdateVirtualCircuitDetails.
An array of mappings, each containing properties for a cross-connect or
cross-connect group associated with this virtual circuit.
The customer and provider can update different properties in the mapping
depending on the situation. See the description of the
:class:`CrossConnectMapping`.
:return: The cross_connect_mappings of this UpdateVirtualCircuitDetails.
:rtype: list[oci.core.models.CrossConnectMapping]
"""
return self._cross_connect_mappings
@cross_connect_mappings.setter
def cross_connect_mappings(self, cross_connect_mappings):
"""
Sets the cross_connect_mappings of this UpdateVirtualCircuitDetails.
An array of mappings, each containing properties for a cross-connect or
cross-connect group associated with this virtual circuit.
The customer and provider can update different properties in the mapping
depending on the situation. See the description of the
:class:`CrossConnectMapping`.
:param cross_connect_mappings: The cross_connect_mappings of this UpdateVirtualCircuitDetails.
:type: list[oci.core.models.CrossConnectMapping]
"""
self._cross_connect_mappings = cross_connect_mappings
@property
def routing_policy(self):
"""
Gets the routing_policy of this UpdateVirtualCircuitDetails.
The routing policy sets how routing information about the Oracle cloud is shared over a public virtual circuit.
Policies available are: `ORACLE_SERVICE_NETWORK`, `REGIONAL`, `MARKET_LEVEL`, and `GLOBAL`.
See `Route Filtering`__ for details.
By default, routing information is shared for all routes in the same market.
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/routingonprem.htm#route_filtering
Allowed values for items in this list are: "ORACLE_SERVICE_NETWORK", "REGIONAL", "MARKET_LEVEL", "GLOBAL"
:return: The routing_policy of this UpdateVirtualCircuitDetails.
:rtype: list[str]
"""
return self._routing_policy
@routing_policy.setter
def routing_policy(self, routing_policy):
"""
Sets the routing_policy of this UpdateVirtualCircuitDetails.
The routing policy sets how routing information about the Oracle cloud is shared over a public virtual circuit.
Policies available are: `ORACLE_SERVICE_NETWORK`, `REGIONAL`, `MARKET_LEVEL`, and `GLOBAL`.
See `Route Filtering`__ for details.
By default, routing information is shared for all routes in the same market.
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/routingonprem.htm#route_filtering
:param routing_policy: The routing_policy of this UpdateVirtualCircuitDetails.
:type: list[str]
"""
allowed_values = ["ORACLE_SERVICE_NETWORK", "REGIONAL", "MARKET_LEVEL", "GLOBAL"]
if routing_policy and routing_policy is not NONE_SENTINEL:
for value in routing_policy:
if not value_allowed_none_or_none_sentinel(value, allowed_values):
raise ValueError(
"Invalid value for `routing_policy`, must be None or one of {0}"
.format(allowed_values)
)
self._routing_policy = routing_policy
@property
def customer_bgp_asn(self):
"""
Gets the customer_bgp_asn of this UpdateVirtualCircuitDetails.
Deprecated. Instead use `customerAsn`.
If you specify values for both, the request will be rejected.
:return: The customer_bgp_asn of this UpdateVirtualCircuitDetails.
:rtype: int
"""
return self._customer_bgp_asn
@customer_bgp_asn.setter
def customer_bgp_asn(self, customer_bgp_asn):
"""
Sets the customer_bgp_asn of this UpdateVirtualCircuitDetails.
Deprecated. Instead use `customerAsn`.
If you specify values for both, the request will be rejected.
:param customer_bgp_asn: The customer_bgp_asn of this UpdateVirtualCircuitDetails.
:type: int
"""
self._customer_bgp_asn = customer_bgp_asn
@property
def customer_asn(self):
"""
Gets the customer_asn of this UpdateVirtualCircuitDetails.
The BGP ASN of the network at the other end of the BGP
session from Oracle.
If the BGP session is from the customer's edge router to Oracle, the
required value is the customer's ASN, and it can be updated only
by the customer.
If the BGP session is from the provider's edge router to Oracle, the
required value is the provider's ASN, and it can be updated only
by the provider.
Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
:return: The customer_asn of this UpdateVirtualCircuitDetails.
:rtype: int
"""
return self._customer_asn
@customer_asn.setter
def customer_asn(self, customer_asn):
"""
Sets the customer_asn of this UpdateVirtualCircuitDetails.
The BGP ASN of the network at the other end of the BGP
session from Oracle.
If the BGP session is from the customer's edge router to Oracle, the
required value is the customer's ASN, and it can be updated only
by the customer.
If the BGP session is from the provider's edge router to Oracle, the
required value is the provider's ASN, and it can be updated only
by the provider.
Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
:param customer_asn: The customer_asn of this UpdateVirtualCircuitDetails.
:type: int
"""
self._customer_asn = customer_asn
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateVirtualCircuitDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateVirtualCircuitDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateVirtualCircuitDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateVirtualCircuitDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this UpdateVirtualCircuitDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateVirtualCircuitDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this UpdateVirtualCircuitDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateVirtualCircuitDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateVirtualCircuitDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateVirtualCircuitDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateVirtualCircuitDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def gateway_id(self):
"""
Gets the gateway_id of this UpdateVirtualCircuitDetails.
The `OCID`__ of the :class:`Drg`
that this private virtual circuit uses.
To be updated only by the customer who owns the virtual circuit.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The gateway_id of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._gateway_id
@gateway_id.setter
def gateway_id(self, gateway_id):
"""
Sets the gateway_id of this UpdateVirtualCircuitDetails.
The `OCID`__ of the :class:`Drg`
that this private virtual circuit uses.
To be updated only by the customer who owns the virtual circuit.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param gateway_id: The gateway_id of this UpdateVirtualCircuitDetails.
:type: str
"""
self._gateway_id = gateway_id
@property
def provider_state(self):
"""
Gets the provider_state of this UpdateVirtualCircuitDetails.
The provider's state in relation to this virtual circuit. Relevant only
if the customer is using FastConnect via a provider. ACTIVE
means the provider has provisioned the virtual circuit from their
end. INACTIVE means the provider has not yet provisioned the virtual
circuit, or has de-provisioned it.
To be updated only by the provider.
Allowed values for this property are: "ACTIVE", "INACTIVE"
:return: The provider_state of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._provider_state
@provider_state.setter
def provider_state(self, provider_state):
"""
Sets the provider_state of this UpdateVirtualCircuitDetails.
The provider's state in relation to this virtual circuit. Relevant only
if the customer is using FastConnect via a provider. ACTIVE
means the provider has provisioned the virtual circuit from their
end. INACTIVE means the provider has not yet provisioned the virtual
circuit, or has de-provisioned it.
To be updated only by the provider.
:param provider_state: The provider_state of this UpdateVirtualCircuitDetails.
:type: str
"""
allowed_values = ["ACTIVE", "INACTIVE"]
if not value_allowed_none_or_none_sentinel(provider_state, allowed_values):
raise ValueError(
"Invalid value for `provider_state`, must be None or one of {0}"
.format(allowed_values)
)
self._provider_state = provider_state
@property
def provider_service_key_name(self):
"""
Gets the provider_service_key_name of this UpdateVirtualCircuitDetails.
The service key name offered by the provider (if the customer is connecting via a provider).
:return: The provider_service_key_name of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._provider_service_key_name
@provider_service_key_name.setter
def provider_service_key_name(self, provider_service_key_name):
"""
Sets the provider_service_key_name of this UpdateVirtualCircuitDetails.
The service key name offered by the provider (if the customer is connecting via a provider).
:param provider_service_key_name: The provider_service_key_name of this UpdateVirtualCircuitDetails.
:type: str
"""
self._provider_service_key_name = provider_service_key_name
@property
def reference_comment(self):
"""
Gets the reference_comment of this UpdateVirtualCircuitDetails.
Provider-supplied reference information about this virtual circuit.
Relevant only if the customer is using FastConnect via a provider.
To be updated only by the provider.
:return: The reference_comment of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._reference_comment
@reference_comment.setter
def reference_comment(self, reference_comment):
"""
Sets the reference_comment of this UpdateVirtualCircuitDetails.
Provider-supplied reference information about this virtual circuit.
Relevant only if the customer is using FastConnect via a provider.
To be updated only by the provider.
:param reference_comment: The reference_comment of this UpdateVirtualCircuitDetails.
:type: str
"""
self._reference_comment = reference_comment
@property
def ip_mtu(self):
"""
Gets the ip_mtu of this UpdateVirtualCircuitDetails.
The layer 3 IP MTU to use on this virtual circuit.
Allowed values for this property are: "MTU_1500", "MTU_9000"
:return: The ip_mtu of this UpdateVirtualCircuitDetails.
:rtype: str
"""
return self._ip_mtu
@ip_mtu.setter
def ip_mtu(self, ip_mtu):
"""
Sets the ip_mtu of this UpdateVirtualCircuitDetails.
The layer 3 IP MTU to use on this virtual circuit.
:param ip_mtu: The ip_mtu of this UpdateVirtualCircuitDetails.
:type: str
"""
allowed_values = ["MTU_1500", "MTU_9000"]
if not value_allowed_none_or_none_sentinel(ip_mtu, allowed_values):
raise ValueError(
"Invalid value for `ip_mtu`, must be None or one of {0}"
.format(allowed_values)
)
self._ip_mtu = ip_mtu
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
6608517 | # --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import argparse
import pprint
import pdb
import time
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data.sampler import Sampler
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list
from model.utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.vgg16_inc_bbox_distil import vgg16_inc
from model.faster_rcnn.resnet import resnet
from model.faster_rcnn.resnet_inc_bbox_distil import resnet_inc_bbox_distil
from model.faster_rcnn.resnet_inc_bbox_distil_residual import resnet_inc_bbox_distil
from model.faster_rcnn.resnet_residual import resnet as resnet_residual
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101',
default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--load_model', dest='load_model',
help='directory to load model', default="",
type=str)
parser.add_argument('--expert_model', dest='expert_model',
help='directory to load model', default=None,
type=str)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="models_inc",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--first', dest='first',
help='add the second, third, ...class',
action='store_true')
parser.add_argument('--base_model', dest='base_model',
help='directory to load base sqe model', default=None,
type=str)
parser.add_argument('--trained_residual_model', dest='trained_residual_model',default='',type=str)
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and diaplay
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0,batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)
#self.rand_num_view= torch.Tensor(list(range(self.num_per_batch))).int()#####[0,1,2...,1748]
#print(self.rand_num_view)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712_incre":
args.imdb_name = "voc_2007_trainval_incre+voc_2012_trainval_incre"
args.imdbval_name = "voc_2007_test_incre"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_trainval+voc_2012_trainval'
args.imdbval_name_org = "voc_2007_test"
elif args.dataset == "pascal_voc_07_incre":
args.imdb_name = "voc_2007_trainval_incre"
args.imdbval_name = "voc_2007_test_incre"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_trainval'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_tv" ########## last new class
elif args.dataset == "pascal_voc_07_15":
args.imdb_name = "voc_2007_5_incre"
args.imdbval_name = "voc_2007_15_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_15_train'
args.imdbval_name_org = "voc_2007_15_test"
args.imdb_name_expert = "voc_2007_5_train_expert"
elif args.dataset == "pascal_voc_07_15_15_plant":
args.imdb_name = "voc_2007_15_plant"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_15_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_plant" ########## last new class
elif args.dataset == "pascal_voc_07_15_16_sheep":
args.imdb_name = "voc_2007_16_sheep"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_15_plant'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_sheep" ########## last new class
args.imdb_name_base = 'voc_2007_15_train'
args.imdb_name_last_expert ="voc_2007_1_train_plant"
elif args.dataset == "pascal_voc_07_15_17_sofa":
args.imdb_name = "voc_2007_17_sofa"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_16_sheep'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_sofa" ########## last new class
args.imdb_name_base = 'voc_2007_15_train'
args.imdb_name_last_expert ="voc_2007_1_train_sheep"
elif args.dataset == "pascal_voc_07_15_18_train":
args.imdb_name = "voc_2007_18_train"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_17_sofa'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_train" ########## last new class
args.imdb_name_base = 'voc_2007_15_train'
args.imdb_name_last_expert ="voc_2007_1_train_sofa"
elif args.dataset == "pascal_voc_07_15_19_tv":
args.imdb_name = "voc_2007_19_tv"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_18_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_tv" ########## last new class
args.imdb_name_base = 'voc_2007_15_train'
args.imdb_name_last_expert ="voc_2007_1_train_train"
elif args.dataset == "pascal_voc_inc_sqe":#['diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
args.imdb_name = "voc_2007_15_pottedplant"#'voc_2007_10_table'#"voc_2007_12_horse"#"voc_2007_15_pottedplant"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org ='voc_2007_10_train'#'voc_2007_11_dog'#"voc_2007_10_table"#"voc_2007_13_motorbike"# 'voc_2007_12_horse'#'voc_2007_10_train'
args.imdb_name_expert = "voc_2007_1_train"########## last new class
elif args.dataset == "pascal_voc_07_10":
args.imdb_name = "voc_2007_10_incre"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_10_train_expert"
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco_40_train":
args.imdb_name = "coco_40_train_inc"
args.imdbval_name = "coco_2014_minival"
args.imdb_name_org = 'coco_40_train_base'
args.imdb_name_expert = "coco_40_train_expert"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "vg":
# train sizes: train, smalltrain, minitrain
# train scale: ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "pascal_voc_07_1":
args.imdb_name = "voc_2007_1_train"
args.imdbval_name = "voc_2007_test_sqe_1"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_07_10_10_table":
args.imdb_name = "voc_2007_10_10_table"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_table" ########## last new class
elif args.dataset == "pascal_voc_07_10_11_dog":
args.imdb_name = "voc_2007_10_11_dog"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_10_table'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_dog" ########## last new class
elif args.dataset == "pascal_voc_07_10_12_horse":
args.imdb_name = "voc_2007_10_12_horse"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_11_dog'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_horse" ########## last new class
elif args.dataset == "pascal_voc_07_10_13_motorbike":
args.imdb_name = "voc_2007_10_13_motorbike"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_12_horse'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_motorbike" ########## last new class
elif args.dataset == "pascal_voc_07_10_14_person":
args.imdb_name = "voc_2007_10_14_person"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_13_motorbike'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_person" ########## last new class
elif args.dataset == "pascal_voc_07_10_15_plant":
args.imdb_name = "voc_2007_10_15_pottedplant"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_14_person'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_plant" ########## last new class
elif args.dataset == "pascal_voc_07_10_16_sheep":
args.imdb_name = "voc_2007_10_16_sheep"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_15_pottedplant'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_sheep" ########## last new class
elif args.dataset == "pascal_voc_07_10_17_sofa":
args.imdb_name = "voc_2007_10_17_sofa"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_16_sheep'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_sofa" ########## last new class
elif args.dataset == "pascal_voc_07_10_18_train":
args.imdb_name = "voc_2007_10_18_train"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_17_sofa'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_train" ########## last new class
elif args.dataset == "pascal_voc_07_10_19_tv":
args.imdb_name = "voc_2007_10_19_tvmonitor"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_18_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_train_tv" ########## last new class
elif args.dataset == "pascal_voc_07_5_b":
args.imdb_name = "voc_2007_5_b"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_5_a'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_5b"
elif args.dataset == "pascal_voc_07_5_c":
args.imdb_name = "voc_2007_5_c"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_5_b'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_5c"
elif args.dataset == "pascal_voc_07_5_d":
args.imdb_name = "voc_2007_5_d"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_5_c'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_1_5d"
elif args.dataset == "pascal_voc_07_19_inc_plant":
args.imdb_name = "voc_2007_19_plant_inc"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_19_ex_plant'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_19_1_plant_inc"
elif args.dataset == "pascal_voc_07_19_inc_sheep":
args.imdb_name = "voc_2007_19_sheep_inc"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_19_ex_sheep'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_19_1_sheep_inc"
elif args.dataset == "pascal_voc_07_19_inc_sofa":
args.imdb_name = "voc_2007_19_sofa_inc"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_19_ex_sofa'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_19_1_sofa_inc"
elif args.dataset == "pascal_voc_07_19_inc_train":
args.imdb_name = "voc_2007_19_train_inc"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_19_ex_train'
args.imdbval_name_org = "voc_2007_test"
args.imdb_name_expert = "voc_2007_19_1_train_inc"
elif args.dataset == "coco_14_train_b":
args.imdb_name = "coco_14_train_b"
args.imdbval_name = "coco_2014_minival"
args.imdb_name_org = 'coco_14_train_a'
args.imdb_name_expert = "coco_14_train_b_expert"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco_b":
args.imdb_name = "coco_b"
args.imdbval_name = "coco_2014_minival"
args.imdb_name_org = 'coco_a'
args.imdb_name_expert = "coco_b_expert"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco_c":
args.imdb_name = "coco_c"
args.imdbval_name = "coco_2014_minival"
args.imdb_name_org = 'coco_b'
args.imdb_name_expert = "coco_c_expert"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "coco_d":
args.imdb_name = "coco_d"
args.imdbval_name = "coco_2014_minival"
args.imdb_name_org = 'coco_c'
args.imdb_name_expert = "coco_d_expert"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
torch.cuda.manual_seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
random.seed(cfg.RNG_SEED)
#torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
if 'coco' in args.dataset:
cfg.TRAIN.USE_FLIPPED = False
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
imdb_org, roidb_org, ratio_list_org, ratio_index_org = combined_roidb(args.imdb_name_org)
train_size = len(roidb)
imdb_expert, roidb_expert, ratio_list_expert, ratio_index_expert = combined_roidb(args.imdb_name_expert)
print('{:d} roidb entries'.format(len(roidb)))
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN_org = vgg16(imdb_org.classes, pretrained=True, class_agnostic=args.class_agnostic)
fasterRCNN_inc = vgg16_inc(imdb.classes, pretrained=True, class_agnostic=args.class_agnostic)
basefrcnn_load_name = 'models/vgg16/pascal_voc_0712/faster_rcnn_1_20.pth'
elif args.net == 'res101':
fasterRCNN_org = resnet(imdb_org.classes, 101, pretrained=True, class_agnostic=args.class_agnostic,model_path='data/pretrained_model/resnet101_caffe.pth')
fasterRCNN_inc = resnet_inc_bbox_distil(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic,model_path='data/pretrained_model/resnet101_caffe.pth')
basefrcnn_load_name = 'models_res101_voc19/res101/pascal_voc_0712/faster_rcnn_1_20.pth'
fasterRCNN_residual = resnet_residual(imdb_expert.classes, 101, pretrained=True,
class_agnostic=args.class_agnostic,model_path='data/pretrained_model/resnet101_caffe.pth') # imdb_expert.classes
elif args.net == 'res50':
fasterRCNN_org = resnet(imdb_org.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
fasterRCNN_inc = resnet_inc_bbox_distil(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
fasterRCNN_residual = resnet_residual(imdb_expert.classes, 50, pretrained=True,
class_agnostic=args.class_agnostic) # imdb_expert.classes
basefrcnn_load_name = 'model_save_dir/models_res50_caffe_voc07_19/res50/pascal_voc/faster_rcnn_1_20_9873.pth'#'models_res50_voc19/res50/pascal_voc_0712/faster_rcnn_1_18_32597.pth'
if "pascal_voc_07_15" in args.dataset:
basefrcnn_load_name = 'model_save_dir/models_res50_voc15_new/res50/pascal_voc_07_15/faster_rcnn_1_20_9003.pth'
if "pascal_voc_07_10" in args.dataset:
basefrcnn_load_name = 'model_save_dir/models_res50_voc10/res50/pascal_voc_07_10/faster_rcnn_1_20_6003.pth'
if "coco" in args.dataset:
basefrcnn_load_name = 'model_save_dir/models_res50_coco40/res50/coco_40_train/faster_rcnn_1_20_34040.pth'
elif args.net == 'res152':
fasterRCNN_org = resnet(imdb_org.classes, 152, pretrained=True, class_agnostic=args.class_agnostic)
fasterRCNN_inc = resnet_inc_bbox_distil(imdb.classes, 152, pretrained=True, class_agnostic=args.class_agnostic)
basefrcnn_load_name = 'models_res152_voc19/res152/pascal_voc_0712/faster_rcnn_1_20.pth'
else:
print("network is not defined")
pdb.set_trace()
if args.load_model!="":
basefrcnn_load_name=args.load_model
#expert_model=args.expert_model
fasterRCNN_residual.create_architecture()
fasterRCNN_org.create_architecture()
fasterRCNN_inc.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
#tr_momentum = cfg.TRAIN.MOMENTUM
#tr_momentum = args.momentum
params = []
for key, value in dict(fasterRCNN_inc.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
for key, value in dict(fasterRCNN_residual.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.cuda:
fasterRCNN_org.cuda()
fasterRCNN_inc.cuda()
fasterRCNN_residual.cuda()
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.resume:
load_name = os.path.join(output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN_inc.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
print("load checkpoint %s" % (basefrcnn_load_name))
if args.cuda > 0:
checkpoint_org = torch.load(basefrcnn_load_name)
#checkpoint_expert = torch.load(expert_model)
else:
checkpoint_org = torch.load(basefrcnn_load_name, map_location=(lambda storage, loc: storage))
#checkpoint_expert = torch.load(expert_model, map_location=(lambda storage, loc: storage))
'''
checkpoint_1 = torch.load('model_save_dir/models_res50_voc10/res50/pascal_voc_07_10/faster_rcnn_1_20_6003.pth')
checkpoint_org['model']['RCNN_bbox_pred.weight'] = torch.cat(
(checkpoint_1['model']['RCNN_bbox_pred.weight'], checkpoint_org['model']['RCNN_bbox_pred_new.weight']), dim=0)
checkpoint_org['model']['RCNN_cls_score.weight'] = torch.cat(
(checkpoint_1['model']['RCNN_cls_score.weight'], checkpoint_org['model']['RCNN_cls_score_new.weight']), dim=0)
checkpoint_org['model']['RCNN_bbox_pred.bias'] = torch.cat(
(checkpoint_1['model']['RCNN_bbox_pred.bias'], checkpoint_org['model']['RCNN_bbox_pred_new.bias']), dim=0)
checkpoint_org['model']['RCNN_cls_score.bias'] = torch.cat(
(checkpoint_1['model']['RCNN_cls_score.bias'], checkpoint_org['model']['RCNN_cls_score_new.bias']), dim=0)
'''
if args.first > 0:
if args.trained_residual_model:
trained_residual_model=args.trained_residual_model
if args.cuda > 0:
checkpoint_tres = torch.load(trained_residual_model)
else:
checkpoint_tres = torch.load(trained_residual_model, map_location=(lambda storage, loc: storage))
imdb_tres, roidb_tres, ratio_list_tres, ratio_index_tres = combined_roidb(args.imdb_name_last_expert)
checkpoint_org['model']['RCNN_bbox_pred.weight'] = torch.cat(
(checkpoint_org['model']['RCNN_bbox_pred.weight'], checkpoint_tres['model']['RCNN_bbox_pred.weight'][4:]),
dim=0)
checkpoint_org['model']['RCNN_cls_score.weight'] = torch.cat(
(checkpoint_org['model']['RCNN_cls_score.weight'], checkpoint_tres['model']['RCNN_cls_score.weight'][1:]),
dim=0)
checkpoint_org['model']['RCNN_bbox_pred.bias'] = torch.cat(
(checkpoint_org['model']['RCNN_bbox_pred.bias'], checkpoint_tres['model']['RCNN_bbox_pred.bias'][4:]), dim=0)
checkpoint_org['model']['RCNN_cls_score.bias'] = torch.cat(
(checkpoint_org['model']['RCNN_cls_score.bias'], checkpoint_tres['model']['RCNN_cls_score.bias'][1:]), dim=0)
else:
checkpoint_org['model']['RCNN_bbox_pred.weight']=torch.cat((checkpoint_org['model']['RCNN_bbox_pred.weight'], checkpoint_org['model']['RCNN_bbox_pred_new.weight']),dim=0)
checkpoint_org['model']['RCNN_cls_score.weight']=torch.cat((checkpoint_org['model']['RCNN_cls_score.weight'], checkpoint_org['model']['RCNN_cls_score_new.weight']),dim=0)
checkpoint_org['model']['RCNN_bbox_pred.bias'] = torch.cat((checkpoint_org['model']['RCNN_bbox_pred.bias'], checkpoint_org['model']['RCNN_bbox_pred_new.bias']), dim=0)
checkpoint_org['model']['RCNN_cls_score.bias'] = torch.cat((checkpoint_org['model']['RCNN_cls_score.bias'], checkpoint_org['model']['RCNN_cls_score_new.bias']), dim=0)
############### the parameters of base class of inc model are initialized by base model ##########################
if args.base_model:
base_model=args.base_model
if args.cuda > 0:
checkpoint_base = torch.load(base_model)
else:
checkpoint_base = torch.load(base_model, map_location=(lambda storage, loc: storage))
imdb_base, roidb_base, ratio_list_base, ratio_index_base = combined_roidb(args.imdb_name_base)
checkpoint_base['model']['RCNN_bbox_pred.weight'] = torch.cat(
(checkpoint_base['model']['RCNN_bbox_pred.weight'], checkpoint_org['model']['RCNN_bbox_pred.weight'][len(imdb_base.classes)*4:]), dim=0)
checkpoint_base['model']['RCNN_cls_score.weight'] = torch.cat(
(checkpoint_base['model']['RCNN_cls_score.weight'], checkpoint_org['model']['RCNN_cls_score.weight'][len(imdb_base.classes):]), dim=0)
checkpoint_base['model']['RCNN_bbox_pred.bias'] = torch.cat(
(checkpoint_base['model']['RCNN_bbox_pred.bias'], checkpoint_org['model']['RCNN_bbox_pred.bias'][len(imdb_base.classes)*4:]), dim=0)
checkpoint_base['model']['RCNN_cls_score.bias'] = torch.cat(
(checkpoint_base['model']['RCNN_cls_score.bias'], checkpoint_org['model']['RCNN_cls_score.bias'][len(imdb_base.classes):]), dim=0)
#fasterRCNN_base = resnet(imdb_base.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
#fasterRCNN_base.create_architecture()
#fasterRCNN_base.load_state_dict(checkpoint_base['model'])
pretrained_dict_base = {k: v for k, v in
checkpoint_base['model'].items() if 'RCNN_cls_score' in k or 'RCNN_bbox_pred' in k}
###################################################################################################################
#'''
#args.session = checkpoint_expert['session']
#args.start_epoch = checkpoint_expert['epoch']
#fasterRCNN_residual.load_state_dict(checkpoint_residual['model'])
#optimizer.load_state_dict(checkpoint_expert['optimizer'])
#lr = optimizer.param_groups[0]['lr']
#if 'pooling_mode' in checkpoint_expert.keys():
# cfg.POOLING_MODE = checkpoint_expert['pooling_mode']
pretrained_dic_org = {k: v for k, v in checkpoint_org['model'].items() if 'RCNN_cls_score_new' not in k and 'RCNN_bbox_pred_new' not in k and 'discriminator' not in k}
#pretrained_dic_org=checkpoint_org['model']
fasterRCNN_org.load_state_dict(pretrained_dic_org)
#fasterRCNN_org.load_state_dict(pretrained_dic_org)
#fasterRCNN_org.load_state_dict(checkpoint_org)
frcnn_inc_model_dict=fasterRCNN_inc.state_dict()
#pretrained_dict = {k: v for k, v in checkpoint_org['model'].items() if 'cls' not in k and 'bbox' not in k}
#pretrained_dict = {k: v for k, v in checkpoint_org['model'].items() if 'bbox' not in k} ################## split old and new cls
pretrained_dict = {k: v for k, v in checkpoint_org['model'].items()} ################## split old and new cls and bbox
'''
pretrained_dict_expert_cls = {k.split('.')[0]+'_new.'+k.split('.')[1]: v[1:] for k, v in checkpoint_expert['model'].items() if 'RCNN_cls' in k }#or 'RCNN_bbox' in k
pretrained_dict_expert_box = {k.split('.')[0] + '_new.' + k.split('.')[1]: v[4:] for k, v in
checkpoint_expert['model'].items() if 'RCNN_bbox' in k}
'''
frcnn_inc_model_dict.update(pretrained_dict)
#frcnn_inc_model_dict.update(pretrained_dict_expert_cls) ################################### expert new initialize !!!!!!!!!!!!!!!!!!!!!!!!!!
#frcnn_inc_model_dict.update(pretrained_dict_expert_box) ################################### expert new initialize !!!!!!!!!!!!!!!!!!!!!!!!!!
if args.first and args.base_model:
frcnn_inc_model_dict.update(pretrained_dict_base)
fasterRCNN_inc.load_state_dict(frcnn_inc_model_dict)
########################### freeze !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
'''
for k, v in fasterRCNN_inc.named_parameters():
if 'RCNN_cls_score' in k :#or 'RCNN_bbox_pred' in k:
print(k)
v.requires_grad = False # freeze
'''
##########################################################################
if args.mGPUs:
fasterRCNN_org = nn.DataParallel(fasterRCNN_org)
fasterRCNN_inc = nn.DataParallel(fasterRCNN_inc)
fasterRCNN_residual = nn.DataParallel(fasterRCNN_residual)
iters_per_epoch = int(train_size / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
fasterRCNN_org.eval()
#for i in fasterRCNN_org.named_parameters():
# print(i)
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN_inc.train()
fasterRCNN_residual.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
#for i in fasterRCNN_org.named_parameters():
# print('train:', i)
data = next(data_iter)
if data==None:
print(data)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
#################frcnn_org_eval######################################
rois_org, cls_prob_org, bbox_pred_org, \
rpn_loss_cls_org, rpn_loss_box_org, \
RCNN_loss_cls_org, RCNN_loss_bbox_org, \
rois_label_org,_ = fasterRCNN_org(im_data, im_info, gt_boxes, num_boxes)
scores_org = cls_prob_org.data
boxes_org = rois_org.data[:, :, 1:5]
#####################################################################
fasterRCNN_inc.zero_grad()
fasterRCNN_residual.zero_grad()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label,rcnn_cls_distil_loss,rcnn_bbox_distil_loss,base_feat_distil_loss, \
rpn_conv1_distil_loss, pooled_feat_distil_loss, cos_loss, \
rpn_loss_cls_r, rpn_loss_bbox_r, RCNN_loss_cls_r, RCNN_loss_bbox_r, \
base_feat_residual_loss \
= fasterRCNN_inc(im_data, im_info, gt_boxes, num_boxes, rois_org,cls_prob_org,bbox_pred_org, rois_label_org, fasterRCNN_org, step, dataset._roidb, ratio_index,fasterRCNN_residual)
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean() \
+ rcnn_cls_distil_loss.mean() + rcnn_bbox_distil_loss.mean() + base_feat_distil_loss.mean() #+ rpn_embed_distil_loss.mean() #+ margin_loss.mean() ############### distil#+ rpn_cls_distil_loss.mean() + rpn_bbox_distil_loss.mean() \
loss += rpn_loss_cls_r.mean() + rpn_loss_bbox_r.mean() \
+ RCNN_loss_cls_r.mean() + RCNN_loss_bbox_r.mean() \
+ base_feat_residual_loss.mean() + pooled_feat_distil_loss.mean()
loss_temp += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16":
clip_gradient(fasterRCNN_inc, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
loss_rcnn_cls_distil_loss=rcnn_cls_distil_loss.mean.item()
loss_rcnn_bbox_distil_loss = rcnn_bbox_distil_loss.mean.item()
loss_base_feat_distil_loss = base_feat_distil_loss.mean.item()
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
loss_rcnn_cls_distil_loss = rcnn_cls_distil_loss.item() ############# distil
loss_rcnn_bbox_distil_loss = rcnn_bbox_distil_loss.item()
loss_base_feat_distil_loss = base_feat_distil_loss.item()
loss_rpn_cls_r = rpn_loss_cls_r.item()
loss_rpn_box_r = rpn_loss_bbox_r.item()
loss_rcnn_cls_r = RCNN_loss_cls_r.item()
loss_rcnn_box_r = RCNN_loss_bbox_r.item()
loss_base_feat_residual = base_feat_residual_loss.item()
loss_rpn_conv1_distil_loss = rpn_conv1_distil_loss.item()
loss_pooled_feat_distil_loss= pooled_feat_distil_loss.item()
loss_cos_loss=cos_loss.item()
#loss_margin_loss = 0#margin_loss.item()
#loss_rpn_cls_distil_loss = rpn_cls_distil_loss.item()
#loss_rpn_bbox_distil_loss = rpn_bbox_distil_loss.item()
#loss_rcnn_cls_distil_loss=0####################
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end-start))
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f, \
rcnn_cls_distil_loss %.4f, rcnn_bbox_distil_loss %.4f , base_feat_distil_loss %.4f, \
rpn_cls_r: %.4f, rpn_box_r: %.4f, rcnn_cls_r: %.4f, rcnn_box_r %.4f, \
loss_base_feat_residual %.4f, \
rpn_conv1_distil_loss %.4f, pooled_feat_distil_loss %.4f, cos_loss %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box,\
loss_rcnn_cls_distil_loss,rcnn_bbox_distil_loss,base_feat_distil_loss, \
loss_rpn_cls_r, loss_rpn_box_r, loss_rcnn_cls_r, loss_rcnn_box_r, \
loss_base_feat_residual, \
loss_rpn_conv1_distil_loss, loss_pooled_feat_distil_loss, loss_cos_loss \
) )# , margin_loss %.4f, rpn_cls_distil_loss %.4f, rpn_bbox_distil_loss %.4f#,loss_margin_loss,loss_rpn_cls_distil_loss,loss_rpn_bbox_distil_loss rpn_embed_distil_loss : %.8f rpn_embed_distil_loss.item()
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box,
'loss_cls_distil_loss':loss_rcnn_cls_distil_loss,
'loss_bbox_distil_loss':loss_rcnn_bbox_distil_loss,
'loss_base_feat_distil_loss': loss_base_feat_distil_loss,
#'loss_margin_loss': loss_margin_loss
}
logger.add_scalars("logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
if epoch==args.max_epochs or "coco" in args.dataset:
save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch,
'model': fasterRCNN_inc.module.state_dict() if args.mGPUs else fasterRCNN_inc.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
save_name_res = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}_res.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch,
'model': fasterRCNN_residual.module.state_dict() if args.mGPUs else fasterRCNN_residual.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name_res)
print('save model: {}'.format(save_name_res))
if args.use_tfboard:
logger.close()
| StarcoderdataPython |
5056192 | <filename>reg_tests/lib/errorPlotting.py
#
# Copyright 2017 National Renewable Energy Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This library provides tools for plotting the output channels over time of a
given solution attribute for two OpenFAST solutions, with the second solution
assumed to be the baseline for comparison. There are functions for solution
file I/O, plot creation, and html creation for navigating the plots.
"""
import os
import sys
import shutil
import numpy as np
import rtestlib as rtl
from fast_io import load_output
def _validateAndExpandInputs(argv):
rtl.validateInputOrExit(argv, 3, "solution1 solution2 attribute")
testSolution = argv[0]
baselineSolution = argv[1]
attribute = argv[2]
rtl.validateFileOrExit(testSolution)
rtl.validateFileOrExit(baselineSolution)
return (testSolution, baselineSolution, attribute)
def _parseSolution(solution):
try:
data, info, _ = load_output(solution)
return (data, info)
except Exception as e:
rtl.exitWithError("Error: {}".format(e))
def _plotError(xseries, y1series, y2series, xlabel, title1, title2):
from bokeh.embed import components
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.models.tools import HoverTool, BoxZoomTool
p1 = figure(title=title1)
p1.title.align = 'center'
p1.grid.grid_line_alpha=0.3
p1.xaxis.axis_label = 'Time (s)'
p1.line(xseries, y1series, color='green', line_width=3, legend='Baseline')
p1.line(xseries, y2series, color='red', line_width=1, legend_label='Local')
p1.add_tools(HoverTool(tooltips=[('Time','$x'), ('Value', '$y')],mode='vline'))
p2 = figure(title=title2, x_range=p1.x_range)
p2.title.align = 'center'
p2.grid.grid_line_alpha = 0
p2.xaxis.axis_label = 'Time (s)'
p2.line(xseries, abs(y2series - y1series), color='blue')
p2.add_tools(HoverTool(tooltips=[('Time','$x'), ('Error', '$y')], mode='vline'))
grid = gridplot([[p1, p2]], plot_width=650, plot_height=375, sizing_mode="scale_both")
script, div = components(grid)
return script, div
def _replace_id_div(html_string, plot):
id_start = html_string.find('id=') + 4
id_end = html_string[id_start:].find('"') + id_start
html_string = plot.join((html_string[:id_start], html_string[id_end:]))
return html_string
def _replace_id_script(html_string, plot):
id_start = html_string.find('var render_items')
id_start += html_string[id_start:].find('roots')
id_start += html_string[id_start:].find('":"') + 3
id_end = html_string[id_start:].find('"') + id_start
html_string = plot.join((html_string[:id_start], html_string[id_end:]))
return html_string
def _save_plot(script, div, path, attribute):
div_class = ' class="col-sm-12 col-md-6 col-lg-6"'
file_name = "_script".join((attribute, ".txt"))
with open(os.path.join(path, file_name), 'w') as f:
script = _replace_id_script(script.replace('\n', '\n '), attribute)
f.write(script)
file_name = "_div".join((attribute, ".txt"))
with open(os.path.join(path, file_name), 'w') as f:
div = _replace_id_div(div, attribute)
ix_insert = div.find('></div>')
div = div_class.join((div[:ix_insert], div[ix_insert:]))
style = 'style="margin:10 auto"'
div = div.replace("<div", " ".join(("<div", style)))
f.write(div)
def plotOpenfastError(testSolution, baselineSolution, attribute):
testSolution, baselineSolution, attribute = _validateAndExpandInputs([
testSolution, baselineSolution, attribute
])
dict1, info1 = _parseSolution(testSolution)
dict2, info2 = _parseSolution(baselineSolution)
try:
channel = info1['attribute_names'].index(attribute)
except Exception as e:
rtl.exitWithError("Error: Invalid channel name--{}".format(e))
title1 = attribute + " (" + info1["attribute_units"][channel] + ")"
title2 = "Max norm"
xlabel = 'Time (s)'
timevec = dict1[:, 0]
y1series = np.array(dict1[:, channel], dtype = np.float)
y2series = np.array(dict2[:, channel], dtype = np.float)
script, div = _plotError(timevec, y1series, y2series, xlabel, title1, title2)
basePath = os.path.sep.join(testSolution.split(os.path.sep)[:-1])
plotPath = os.path.join(basePath, "plots")
rtl.validateDirOrMkdir(plotPath)
_save_plot(script, div, plotPath, attribute)
def _htmlHead(title):
head = '<!DOCTYPE html>' + '\n'
head += '<html>' + '\n'
head += '<head>' + '\n'
head += ' <title>{}</title>'.format(title) + '\n'
head += ' <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" integrity="<KEY>" crossorigin="anonymous">' + '\n'
head += ' <link href="https://cdn.pydata.org/bokeh/release/bokeh-widgets-1.2.0.min.css" rel="stylesheet" type="text/css">' + '\n'
head += ' <link href="https://cdn.pydata.org/bokeh/release/bokeh-1.2.0.min.css" rel="stylesheet" type="text/css">' + '\n'
head += ' <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js"></script>' + '\n'
head += ' <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>' + '\n'
head += ' <script src="https://cdn.pydata.org/bokeh/release/bokeh-1.2.0.min.js"></script>' + '\n'
head += ' <script src="https://cdn.pydata.org/bokeh/release/bokeh-widgets-1.2.0.min.js"></script>' + '\n'
head += ' <script type="text/javascript"> Bokeh.set_log_level("info"); </script>' + '\n'
head += ' <style media="screen" type="text/css">'
head += ' .cell-warning {'
head += ' background-color: #efc15c;'
head += ' }'
head += ' .cell-highlight {'
head += ' background-color: #f5ed86 ;'
head += ' }'
head += ' </style>'
head += '</head>' + '\n'
return head
def _htmlTail():
tail = '</html>' + '\n'
return tail
def _tableHead(columns):
head = ' <table class="table table-bordered table-hover table-sm" style="margin: auto; width: 100%; font-size:80%">' + '\n'
head += ' <thead>' + '\n'
head += ' <tr>' + '\n'
head += ' <th>#</th>' + '\n'
for column in columns:
head += ' <th>{}</th>'.format(column) + '\n'
head += ' </tr>' + '\n'
head += ' </thead>' + '\n'
return head
def finalizePlotDirectory(test_solution, plot_list, case):
base_path = os.path.sep.join(test_solution.split(os.path.sep)[:-1])
plot_path = os.path.join(base_path, "plots")
with open(os.path.join(base_path, '.'.join((case, 'html'))), 'r') as html:
html = html.read()
script_ix = html.rfind('</script>\n') + len('</script>\n')
for i, plot in enumerate(plot_list):
_path = os.path.join(plot_path, plot + '_div.txt')
with open(_path, 'r') as f:
div = f.read().strip().join((' ', '\n'))
html = ''.join((html, div))
html = ''.join((html, ' </div>' + '\n'))
html = ''.join((html, ' </div>' + '\n'))
html = ''.join((html, '</body>' + '\n'))
html = ''.join((html, _htmlTail()))
for i, plot in enumerate(plot_list):
_path = os.path.join(plot_path, f'{plot}_script.txt')
with open(_path, "r") as f:
_s = f.read()
if i == 0:
script = _s
else:
script = ''.join((script, _s))
shutil.rmtree(plot_path, ignore_errors=True)
script = ''.join((script, '\n'))
html = script.join((html[:script_ix], html[script_ix:]))
with open(os.path.join(base_path, '.'.join((case, 'html'))), 'w') as f:
f.write(html)
def exportResultsSummary(path, results):
with open(os.path.join(path, "regression_test_summary.html"), "w") as html:
html.write( _htmlHead("Regression Test Summary") )
html.write('<body>' + '\n')
html.write(' <h2 class="text-center">{}</h2>'.format("Regression Test Summary") + '\n')
html.write(' <div class="container">' + '\n')
# Test Case - Pass/Fail - Max Relative Norm
data = [('<a href="{0}/{0}.html">{0}</a>'.format(r[0]), r[1]) for i,r in enumerate(results)]
table = _tableHead(['Test Case', 'Pass/Fail'])
body = ' <tbody>' + '\n'
for i, d in enumerate(data):
body += ' <tr>' + '\n'
body += ' <th scope="row">{}</th>'.format(i+1) + '\n'
body += ' <td>{0:s}</td>'.format(d[0]) + '\n'
fmt = '{0:s}'
if d[1] == "FAIL":
body += (' <td class="cell-warning">' + fmt + '</td>').format(d[1]) + '\n'
else:
body += (' <td>' + fmt + '</td>').format(d[1]) + '\n'
body += ' </tr>' + '\n'
body += ' </tbody>' + '\n'
table += body
table += ' </table>' + '\n'
html.write(table)
html.write(' <br>' + '\n')
html.write(' </div>' + '\n')
html.write('</body>' + '\n')
html.write( _htmlTail() )
html.close()
def exportCaseSummary(path, case, results, results_max, tolerance):
with open(os.path.join(path, case+".html"), "w") as html:
html.write( _htmlHead(case + " Summary") )
html.write('<body>\n')
html.write(' <h2 class="text-center">{}</h2>\n'.format(case + " Summary"))
html.write(' <h4 class="text-center">Maximum values for each norm are <span class="cell-warning">highlighted</span> and failing norms (norm >= {0}) are <span class="cell-highlight">highlighted</span></h2>\n'.format(tolerance))
html.write(' <div class="container">\n')
data = [
('<a href="#{0}">{0}</a>'.format(attribute), *norms)
for attribute, *norms in results
]
cols = [
'Channel', 'Relative Max Norm',
'Relative L2 Norm', 'Infinity Norm'
]
table = _tableHead(cols)
body = ' <tbody>' + '\n'
for i, d in enumerate(data):
body += ' <tr>' + '\n'
body += ' <th scope="row">{}</th>'.format(i+1) + '\n'
body += ' <td>{0:s}</td>'.format(d[0]) + '\n'
fmt = '{0:0.4e}'
for j, val in enumerate(d[1]):
if val == results_max[j]:
body += (' <td class="cell-warning">' + fmt + '</td>\n').format(val)
elif val > tolerance:
body += (' <td class="cell-highlight">' + fmt + '</td>\n').format(val)
else:
body += (' <td>' + fmt + '</td>\n').format(val)
body += ' </tr>' + '\n'
body += ' </tbody>' + '\n'
table += body
table += ' </table>' + '\n'
html.write(table)
html.write(' <br>' + '\n')
html.write(' </div>' + '\n')
html.write('</body>' + '\n')
html.write( _htmlTail() )
| StarcoderdataPython |
62237 | <reponame>ComtecSystem-dev/Python_DB_Pool<filename>Demo/cims_db.py<gh_stars>0
# -*- coding: utf-8 -*-
################################################################################
# _____ _ #
# / ____ | | #
# | | ___ _ __ ____| |__ ___ ___ #
# | | / _ \/ '_ ` _ \_ __/ _ \/ __/ #
# | |___| (_) | | | | | | |_| /__/ (_ #
# \_____\___/|_| |_| |_/\___\___|\___\ #
# _____ _ #
# / ____| | | #
# | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# ____) | |_| \__ \ || __/ | | | | \__ \ #
# |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# #
################################################################################
# #
# Copyright (c) 2018 Comtec Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import psycopg2
from psycopg2 import pool
class DBManager(object):
def __init__(self):
self.ip = '127.0.0.1'
self.port = '5432'
self.dbname = ''
self.user_id = ''
self.user_pw = ''
self.conn = None
pass
def print_dbinfo(self):
print "[IP=%s][PORT=%s][DB Name=%s][User ID=%s][User PW=%s]" % (self.ip, self.port, self.dbname, self.user_id, self.user_pw)
def Connect(self, ip, port, dbname, user_id, user_pw):
self.ip = ip
self.port = port
self.dbname = dbname
self.user_id = user_id
self.user_pw = user_pw
self.print_dbinfo()
#self.conn = psycopg2.connect("host='%s' port='%s' dbname='%s' user='%s' password='%s'" % (self.ip, self.port, self.dbname, self.user_id, self.user_pw))
self.conn= psycopg2.pool.ThreadedConnectionPool(1
, 20
, user = user_id
, password = <PASSWORD>
, host = ip
, port = port
, database = dbname)
if self.conn is None:
return False
return True
def Get_Conn(self):
if self.conn is None:
return None
return self.conn.getconn()
def Put_Conn(self, conn):
if self.conn is None:
return None
self.conn.putconn(conn)
def Select(self, strQuery):
#print strQuery
# Init
conn = self.Get_Conn()
if conn is None:return None
cursor = conn.cursor()
#cursor = self.conn.cursor()
# Query 실행
try:
cursor.execute(strQuery)
except psycopg2.IntegrityError :
conn.rollback()
print "(Select) psycopg2.IntegrityError"
except Exception as e:
conn.rollback()
cursor.close()
print "(Select) Exception : %s" % (e)
self.Put_Conn(conn);
return cursor
def Execute(self, strQuery):
return_state = None
#print "(Query Execute) %s " % strQuery
# Init
conn = self.Get_Conn()
if conn is None:return None
cursor = conn.cursor()
#cursor = self.conn.cursor()
# Query 실행
try :
cursor.execute(strQuery)
conn.commit()
return_state = True
except psycopg2.IntegrityError :
conn.rollback()
return_state = False
print "(Execute) psycopg2.IntegrityError"
except Exception as e:
conn.rollback()
return_state = False
print "(Execute) Exception : %s" % (e)
self.Put_Conn(conn);
cursor.close()
return return_state
def Execute_List(self, list_query):
return_state = True
# Init
conn = self.Get_Conn()
if conn is None:return None
cursor = conn.cursor()
#cursor = self.conn.cursor()
for strQuery in list_query:
#print "(Query Execute) %s " % strQuery
# Query 실행
try :
cursor.execute(strQuery)
except psycopg2.IntegrityError :
print "(Execute_List) psycopg2.IntegrityError"
return_state = False;
break;
except Exception as e:
print "(Execute_List) Exception : %s" % (e)
return_state = False;
break;
if return_state == None or return_state==False:
conn.rollback()
else:
conn.commit()
cursor.close()
self.Put_Conn(conn);
return return_state
dbmanager = DBManager() | StarcoderdataPython |
1849972 | revision = '<KEY>'
down_revision = '0dec7733925d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
alembic.op.alter_column('users', 'patreon_user', new_column_name="patreon_user_id")
def downgrade():
alembic.op.alter_column('users', 'patreon_user_id', new_column_name="patreon_user")
| StarcoderdataPython |
6641054 | <filename>ghidra_scripts/RunSolve.py
#Run angr solver using given parameters
#@author jackdekleuver
#@category Concolic
#@keybinding
#@menupath Tools.Concolic Execution.Run
#@toolbar
import argparse
def run_script(server_host, server_port):
import ghidra_bridge
# load something ghidra doesn't have
import angr
from angr.engines.pcode.lifter import IRSB, PcodeBasicBlockLifter, ExitStatement, IRSB_MAX_SIZE, IRSB_MAX_INST, MAX_INSTRUCTIONS, MAX_BYTES
import claripy
import sys
import pypcode
import archinfo
import time
print("Running inside the bridge!")
# create the bridge and load the flat API/ghidra modules into the namespace
with ghidra_bridge.GhidraBridge(connect_to_host=server_host, connect_to_port=server_port, namespace=globals()) as bridge:
class MemoryMapping():
def __init__(self, program, startAddress):
self.program = program
self.startAddress = startAddress
# when calling an external function, we need to remember which function and library it is that we call
next_function = ""
next_library = ""
class MySpace():
def __init__(self, name):
self.name = name
class MyAddress(pypcode.Address):
def __init__(self, ctx, space, offset, ghidra_address):
super().__init__(ctx, space, offset)
self.ghidra_address = ghidra_address
@property
def is_constant(self):
return self.ghidra_address.isConstantAddress()
class MyVarnode(pypcode.Varnode):
def __init__(self, ctx, space, offset, size, ghidra_varnode):
super().__init__(ctx, space, offset, size)
program = getCurrentProgram()
language = program.getLanguage()
programContext = bridge.get_ghidra_api().program.util.ProgramContextImpl(language)
spaceContext = bridge.get_ghidra_api().program.util.ProgramContextImpl(language)
self.vcontext = bridge.get_ghidra_api().program.util.VarnodeContext(program, programContext, spaceContext)
self.ghidra_varnode = ghidra_varnode
def get_register_name(self):
return self.vcontext.getRegister(self.ghidra_varnode).getName()
def get_space_from_const(self):
# self.ghidra_varnode.getAddress().getAddressSpace().getName() returns const, but for some reason that won't work
return MySpace("mem") # if the name of the address space is "const" then it expects this to return an addres space with a name of either "ram" or "mem", not sure exactly the consequences of faking this out are
def get_addr(self):
return MyAddress(self.ctx, self.space, self.offset, self.ghidra_varnode.getAddress())
class GhidraPcodeBlockLifter(PcodeBasicBlockLifter):
def __init__(self, arch):
super().__init__(arch)
'''
Mostly copied this whole function from PcodeBasicBlockLifter
just changed the line that calls out to pypcode translate to
do a direct mapping from pcode to TranslationResult instead
'''
def lift(self,
irsb,
program,
baseaddr,
adjusted_address,
pcodes,
bytes_offset = 0,
max_bytes = None,
max_inst = None):
if max_bytes is None or max_bytes > MAX_BYTES:
max_bytes = min(len(pcodes), MAX_BYTES)
if max_inst is None or max_inst > MAX_INSTRUCTIONS:
max_inst = MAX_INSTRUCTIONS
irsb.behaviors = self.behaviors # FIXME
# Translate
addr = baseaddr + bytes_offset
##### Start of modified block ######
pcode_array = []
for pcode in pcodes:
inputs_varnodes = []
# convert pcode input Varnodes to pypcode Varnodes
for inp in pcode.inputs:
inputs_varnodes.append(MyVarnode(self.context, inp.getAddress().getAddressSpace(), inp.offset, inp.size, inp))
# convert pcode output Varnode to pypcode Varnode
if pcode.output is not None:
output_varnode = MyVarnode(self.context, pcode.output.getAddress().getAddressSpace(), pcode.output.offset, pcode.output.size, pcode.output)
else:
output_varnode = None
# Convert Ghidra raw Pcode to pypcode PcodeOp
pcode_array.append(pypcode.PcodeOp(self.context, pcode.seqnum, pypcode.OpCode(pcode.opcode), inputs_varnodes, output_varnode))
translations = []
addrspace = getAddressFactory().getAddress(hex(baseaddr)).getAddressSpace()
address = pypcode.Address(self.context, addrspace, baseaddr)
instruction = program.getListing().getInstructionAt(getAddressFactory().getAddress(adjusted_address))
# Convert PcodeOps to Translations
translation = pypcode.Translation(
ctx = self.context,
address = address,
length = instruction.getLength(),
asm_mnem = instruction.getMnemonicString(),
asm_body = instruction.toString().split(instruction.getMnemonicString())[1],
ops = pcode_array
)
translations.append(translation)
##### End modified block #####
irsb._instructions = translations
# Post-process block to mark exits and next block
next_block = None
for insn in irsb._instructions:
for op in insn.ops:
if (op.opcode in [pypcode.OpCode.BRANCH, pypcode.OpCode.CBRANCH]
and op.inputs[0].get_addr().is_constant):
print('Block contains relative p-code jump at '
'instruction {}:{}, which is not emulated '
'yet.'.format(op.seq.getTarget().getOffset(), op.seq.getTime()))
if op.opcode == pypcode.OpCode.CBRANCH:
irsb._exit_statements.append((
op.seq.getTarget().getOffset(), op.seq.getTime(),
ExitStatement(op.inputs[0].offset, 'Ijk_Boring')))
elif op.opcode == pypcode.OpCode.BRANCH:
next_block = (op.inputs[0].offset, 'Ijk_Boring')
elif op.opcode == pypcode.OpCode.BRANCHIND:
next_block = (None, 'Ijk_Boring')
elif op.opcode == pypcode.OpCode.CALL:
next_block = (op.inputs[0].offset, 'Ijk_Call')
elif op.opcode == pypcode.OpCode.CALLIND:
next_block = (None, 'Ijk_Call')
elif op.opcode == pypcode.OpCode.RETURN:
next_block = (None, 'Ijk_Ret')
if len(irsb._instructions) > 0:
last_insn = irsb._instructions[-1]
fallthru_addr = last_insn.address.offset + last_insn.length
else:
fallthru_addr = addr
if next_block is None:
next_block = (fallthru_addr, 'Ijk_Boring')
irsb.next, irsb.jumpkind = next_block
def is_successful(state):
if(state.ip.args[0] == sink):
return True
return False
def get_func_address(funcName):
return int(getFunction(funcName).getBody().getMinAddress().toString(), 16)
def get_pcode_at_address(address):
# Fails when trying to get pcode of an external thunk-ed function
try:
return getCurrentProgram().getListing().getInstructionAt(getAddressFactory().getAddress(address)).getPcode(), getCurrentProgram(), address
except AttributeError:
# The address doesn't exist in the main program, check if globals are set
global next_library
global next_function
if next_library != "" and next_function != "":
external_program = get_external_program(next_library)
functionManager = external_program.getFunctionManager()
for fn in functionManager.getFunctions(True):
if fn.getName() == next_function:
function = fn
break
if function is None:
# couldn't find the function in external program, propagate exception
print("Couldn't find function {} in {}".format(next_function, next_library))
raise
functionAddress = function.getBody().getMinAddress().getOffset()
memory_start = int(address, 16) - (functionAddress - external_program.getImageBase().getOffset()) # find the address where this library is mapped in memory
address_in_program = hex(int(address, 16) - memory_start + external_program.getImageBase().getOffset())
print("Address {} is at {} in program {}".format(address, address_in_program, next_library))
next_library = ""
next_function = ""
return external_program.getListing().getInstructionAt(getAddressFactory().getAddress(address_in_program)).getPcode(), external_program, address_in_program
else:
raise
def successor_func(state, **run_args):
currentAddress = state.ip.args[0]
containingFunction = get_function_containing_address(hex(currentAddress))
print("current address in state:", hex(currentAddress))
# figure out if we are about to make a call to an external program
if containingFunction is not None and containingFunction.isThunk():
externalLibraryName = get_library_name(containingFunction)
print("Preparing for external function call to {} in {}".format(get_function_name(containingFunction), externalLibraryName))
# prepare to get the function in the external program
global next_library
global next_function
next_library = externalLibraryName
next_function = get_function_name(containingFunction)
try:
current_pcode, program, adjusted_address = get_pcode_at_address(hex(currentAddress))
except AttributeError:
print("Couldn't get pcode at address:", hex(currentAddress), "falling back to pypcode lifter")
# fallback to original lifter for external function
return state.project.factory.successors(state, **run_args)
irsb = IRSB.empty_block(archinfo.ArchAMD64, currentAddress, None, None, None, None, None, None)
block_lifter.lift(irsb, program, currentAddress, adjusted_address, current_pcode, 0, None, None)
return state.project.factory.successors(state, irsb=irsb, **run_args)
def get_function_containing_address(address):
return currentProgram.getFunctionManager().getFunctionContaining(getAddressFactory().getAddress(address))
def get_library_name(function):
if not function.isThunk():
print("Can't find library name for a non-Thunk function")
return None
thunked_function = function.getThunkedFunction(True)
if not thunked_function.isExternal():
print("Can't get library name for function that is not external")
return None
return thunked_function.getExternalLocation().getLibraryName()
def get_function_name(function):
return function.getName()
def get_external_program(library_name):
libraryPath = currentProgram.getExternalManager().getExternalLibrary(library_name).getAssociatedProgramPath()
libraryFile = state.getProject().getProjectData().getFile(libraryPath)
libraryProgram = libraryFile.getImmutableDomainObject(java.lang.Object(), ghidra.framework.model.DomainFile.DEFAULT_VERSION, None)
return libraryProgram
def get_pcode_of_external_function(program, function_name):
functionManager = program.getFunctionManager()
for fn in functionManager.getFunctions(True):
if fn.getName() == function_name:
function = fn
break
if function is None:
return None
firstInstruction = program.getListing().getInstructionAt(function.getBody().getMinAddress())
lastInstruction = program.getListing().getInstructionAt(function.getBody().getMaxAddress())
currentInstruction = firstInstruction
pcode = []
pcode += currentInstruction.getPcode()
while True:
currentInstruction = currentInstruction.getNext()
pcode += currentInstruction.getPcode()
if currentInstruction == lastInstruction.getNext():
# Reached the end of the function
break
print("Min address:", function.getBody().getMinAddress())
print("Max address:", function.getBody().getMaxAddress())
print("Pcodes:", pcode)
return pcode
def get_sink_address():
sink_addr = ghidra.concolic.ConcolicAnalyzer.getSink()
if sink_addr is None:
print('Please set the Sink address before running the script!')
sys.exit(1)
return int(sink_addr.toString(), 16)
def get_avoid_addresses():
avoid_addrs = [int(address.toString(), 16) for address in ghidra.concolic.ConcolicAnalyzer.getAvoidAddresses()]
if len(avoid_addrs) == 0:
print('WARN: list of avoid addresses is empty')
return avoid_addrs
def get_source_address():
source_addr = ghidra.concolic.ConcolicAnalyzer.getSource()
if source_addr is None:
print('Please set the Source address before running the script!')
sys.exit(1)
return int(source_addr.toString(), 16)
############ Setup state ##########
start_time = time.time()
# Get program name from ghidra
filename = getCurrentProgram().getExecutablePath()
base_address = getCurrentProgram().getImageBase().getOffset()
engine = ghidra.concolic.ConcolicAnalyzer.getEngine()
if engine.name() == "PYPCODE" or engine.name() == "PCODESYM":
project = angr.Project(filename, load_options={'main_opts':{'base_addr': base_address},'auto_load_libs':False}, engine=angr.engines.UberEnginePcode)
else:
project = angr.Project(filename, load_options={'main_opts':{'base_addr': base_address},'auto_load_libs':False})
sink = get_sink_address()
avoids = get_avoid_addresses()
start = get_source_address()
stdin_args = []
for buff in ghidra.concolic.ConcolicAnalyzer.getStdin():
if buff.getSymbolic():
stdin_args.append(claripy.BVS('arg' + str(len(stdin_args)), len(buff.getValue())*8))
else:
# process string with escape characters into a bytestring
value = buff.getValue().encode('utf-8').decode('unicode-escape').encode('utf-8')
stdin_args.append(claripy.BVV(value))
stdin_arg = angr.SimFileStream(name='stdin', content=claripy.Concat(*stdin_args), has_end=False)
func_args = []
for arg in ghidra.concolic.ConcolicAnalyzer.getArgs():
array_elems = []
for elem in arg.getValues():
if arg.getSymbolic():
array_elems.append(claripy.BVS('arg'+str(len(func_args)), len(elem)*8))
else:
# process string with escape characters into a bytestring
value = elem.encode('utf-8').decode('unicode-escape').encode('utf-8')
array_elems.append(claripy.BVV(value))
if arg.getArray():
func_args.append([angr.PointerWrapper(e) for e in array_elems])
else:
func_args.append(array_elems[0])
call_state = project.factory.call_state(start, *func_args, stdin=stdin_arg, add_options={angr.options.LAZY_SOLVES,
angr.options.ZERO_FILL_UNCONSTRAINED_MEMORY, angr.options.ZERO_FILL_UNCONSTRAINED_REGISTERS})
simulation = project.factory.simgr(call_state)
block_lifter = GhidraPcodeBlockLifter(archinfo.ArchAMD64)
######### Do symbolic execution ########
if engine.name() == "PCODESYM":
simulation.explore(find=is_successful, avoid=avoids, successor_func=successor_func)
else:
simulation.explore(find=is_successful, avoid=avoids)
######## Post run analysis #########
if len(simulation.found) > 0:
for solution_state in simulation.found:
for i, arg in enumerate(func_args):
if isinstance(arg, list):
print("[>>] arg {}:".format(i+1))
for k, elem in enumerate(arg):
print("\t{}: {!r}".format(k+1, solution_state.solver.eval(elem.value, cast_to=bytes).split(b"\0")[0]))
else:
print("[>>] arg {}: {!r}".format(i+1, solution_state.solver.eval(arg, cast_to=bytes).split(b"\0")[0]))
print("stdin: {}".format(solution_state.posix.dumps(0)))
else:
print("[>>>] no solution found :(")
print("Script ran in {} seconds".format(time.time() - start_time))
if __name__ == "__main__":
in_ghidra = False
try:
import ghidra
# we're in ghidra!
in_ghidra = True
except ModuleNotFoundError:
# not ghidra
pass
if in_ghidra:
import ghidra_bridge_server
script_file = getSourceFile().getAbsolutePath()
# spin up a ghidra_bridge_server and spawn the script in external python to connect back to it
python_path = ghidra.concolic.ConcolicAnalyzer.getPython()
ghidra_bridge_server.GhidraBridgeServer.run_script_across_ghidra_bridge(script_file, python=python_path)
else:
# we're being run outside ghidra! (almost certainly from spawned by run_script_across_ghidra_bridge())
parser = argparse.ArgumentParser(
description="Example py3 script that's expected to be called from ghidra with a bridge")
# the script needs to handle these command-line arguments and use them to connect back to the ghidra server that spawned it
parser.add_argument("--connect_to_host", type=str, required=False,
default="127.0.0.1", help="IP to connect to the ghidra_bridge server")
parser.add_argument("--connect_to_port", type=int, required=True,
help="Port to connect to the ghidra_bridge server")
args = parser.parse_args()
run_script(server_host=args.connect_to_host,
server_port=args.connect_to_port)
| StarcoderdataPython |
234794 | <reponame>CarlosMatheus/Python-Web-Framework
"""
This exemplify how a WSGI works
"""
# This is a function from a default python library that allow us to make a WSGI server.
from wsgiref.simple_server import make_server
from wsgi.web_server_gateway_interface import WebServerGatewayInterface
def web_app(environment, response):
"""
A simple web app
:param environment:
:param response:
:return:
"""
print(environment['PATH_INFO'])
status = '200 OK'
headers = [('Content-type', 'text/html; charset=utf-8')]
response(status, headers)
f = open('./templates/index.html', 'r').read()
return [f.encode()]
# Empty string means that the host will be local host
host = ''
port = 8080
# with make_server(host, port, web_app) as server:
# if not host:
# print("Serving on port %d \nVisit http://127.0.0.1:%d" % (port, port))
# else:
# print("Serving on port %d \nVisit http://%s:%d" % (port, host, port))
# print("To kill the server enter 'control + c'")
#
# # will server forever until we kill it
# server.serve_forever()
with WebServerGatewayInterface(host, port, web_app) as server:
if not host:
print("Serving on port %d \nVisit http://127.0.0.1:%d" % (port, port))
else:
print("Serving on port %d \nVisit http://%s:%d" % (port, host, port))
print("To kill the server enter 'control + c'")
# will server forever until we kill it
server.serve_forever()
| StarcoderdataPython |
8059784 | from __future__ import annotations
from ..typecheck import *
from enum import IntEnum
from ..import core
from .import dap
from ..watch import Watch
from .debugger import Debugger
from .error import Error
from ..breakpoints import (
Breakpoints,
SourceBreakpoint,
)
from .variable import (
Variable,
SourceLocation,
)
from .configuration import (
AdapterConfiguration,
ConfigurationExpanded,
TaskExpanded
)
from .transport import TransportProtocol, TransportProtocolListener
class SessionListener (Protocol):
async def on_session_task_request(self, session: Session, task: TaskExpanded): ...
async def on_session_terminal_request(self, session: Session, request: dap.RunInTerminalRequestArguments) -> dap.RunInTerminalResponse: ...
def on_session_state_changed(self, session: Session, state: Session.State): ...
def on_session_selected_frame(self, session: Session, frame: Optional[dap.StackFrame]): ...
def on_session_output_event(self, session: Session, event: dap.OutputEvent): ...
def on_session_updated_modules(self, session: Session): ...
def on_session_updated_sources(self, session: Session): ...
def on_session_updated_variables(self, session: Session): ...
def on_session_updated_threads(self, session: Session): ...
class Session(TransportProtocolListener, core.Logger):
class State (IntEnum):
STARTING = 3
STOPPED = 0
STOPPING = 4
# puased/running is based on selected thread
PAUSED = 1
RUNNING = 2
stopped_reason_build_failed=0
stopped_reason_launch_error=1
stopped_reason_dispose=2
stopped_reason_cancel=3
stopped_reason_terminated_event=4
stopped_reason_manual=5
def __init__(self,
adapter_configuration: AdapterConfiguration,
configuration: ConfigurationExpanded,
restart: Any|None,
no_debug: bool,
breakpoints: Breakpoints,
watch: Watch,
listener: SessionListener,
transport_log: core.Logger,
debugger: Debugger,
parent: Session|None = None
) -> None:
self.adapter_configuration = adapter_configuration
self.configuration = configuration
self.restart = restart
self.no_debug = no_debug
self.listener = listener
self.children: list[Session] = []
self.parent = parent
self.debugger = debugger
if parent:
parent.children.append(self)
self.transport_log = transport_log
self.state_changed = core.Event[int]()
self.breakpoints = breakpoints
self.breakpoints_for_id: dict[int, SourceBreakpoint] = {}
self.breakpoints.data.on_send.add(self.on_send_data_breakpoints)
self.breakpoints.function.on_send.add(self.on_send_function_breakpoints)
self.breakpoints.filters.on_send.add(self.on_send_filters)
self.breakpoints.source.on_send.add(self.on_send_source_breakpoint)
self.watch = watch
self.watch.on_added.add(lambda expr: self.watch.evaluate_expression(self, expr))
self._transport: Optional[TransportProtocol] = None
self.launching_async: Optional[core.Future] = None
self.capabilities = dap.Capabilities()
self.stop_requested = False
self.launch_request = True
self._state = Session.State.STARTING
self._status = 'Starting'
self.disposeables: list[Any] = []
self.complete: core.Future[None] = core.Future()
self.threads_for_id: dict[int, Thread] = {}
self.all_threads_stopped = False
self.selected_explicitly = False
self.selected_thread = None
self.selected_frame = None
self.threads: list[Thread] = []
self.variables: list[Variable] = []
self.sources: dict[int|str, dap.Source] = {}
self.modules: dict[int|str, dap.Module] = {}
@property
def name(self) -> str:
return self.configuration.name
@property
def state(self) -> State:
return self._state
@state.setter
def state(self, state: State) -> None:
if self._state == state:
return
self._state = state
self.listener.on_session_state_changed(self, state)
@property
def status(self) -> str|None:
return self._status
def _change_status(self, status: str):
self._status = status
self.listener.on_session_state_changed(self, self._state)
async def launch(self) -> None:
try:
self.launching_async = core.run(self._launch())
await self.launching_async
except core.Error as e:
self.launching_async = None
core.exception(e)
self.error('... an error occured, ' + str(e))
await self.stop_forced(reason=Session.stopped_reason_launch_error)
except core.CancelledError:
...
self.launching_async = None
async def _launch(self) -> None:
assert self.state == Session.State.STOPPED, 'debugger not in stopped state?'
self.state = Session.State.STARTING
self.configuration = await self.adapter_configuration.configuration_resolve(self.configuration)
if not self.adapter_configuration.installed_version:
raise core.Error('Debug adapter with type name "{}" is not installed. You can install it by running Debugger: Install Adapters'.format(self.adapter_configuration.type))
if not await self.run_pre_debug_task():
self.info('Pre debug command failed, not starting session')
self.launching_async = None
await self.stop_forced(reason=Session.stopped_reason_build_failed)
return
self._change_status('Starting')
try:
transport = await self.adapter_configuration.start(log=self.transport_log, configuration=self.configuration)
except Exception as e:
raise core.Error(f'Unable to start the adapter process: {e}')
self._transport = TransportProtocol(
transport,
self,
self.transport_log
)
capabilities: dap.Capabilities = await self.request('initialize', {
'clientID': 'sublime',
'clientName': 'Sublime Text',
'adapterID': self.configuration.type,
'pathFormat': 'path',
'linesStartAt1': True,
'columnsStartAt1': True,
'supportsVariableType': True,
'supportsVariablePaging': False,
'supportsRunInTerminalRequest': True,
'supportsMemoryReferences': True,
'locale': 'en-us'
})
self.capabilities = capabilities
# remove/add any exception breakpoint filters
self.breakpoints.filters.update(capabilities.exceptionBreakpointFilters or [])
if self.restart:
self.configuration['__restart'] = self.restart
if self.no_debug:
self.configuration['noDebug'] = True
if self.configuration.request == 'launch':
self.launch_request = True
await self.request('launch', self.configuration)
elif self.configuration.request == 'attach':
self.launch_request = False
await self.request('attach', self.configuration)
else:
raise core.Error('expected configuration to have request of either "launch" or "attach" found {}'.format(self.configuration.request))
self.adapter_configuration.did_start_debugging(self)
# get the baseline threads after launch/attach
# according to https://microsoft.github.io/debug-adapter-protocol/overview
self.refresh_threads()
# At this point we are running?
self._change_status('Running')
self.state = Session.State.RUNNING
async def request(self, command: str, arguments: Any) -> Any:
if not self._transport:
raise core.Error(f'Debug Session {self.status}')
return await self._transport.send_request_asyc(command, arguments)
async def wait(self) -> None:
await self.complete
async def run_pre_debug_task(self) -> bool:
pre_debug_command = self.configuration.pre_debug_task
if pre_debug_command:
self._change_status('Running pre debug command')
r = await self.run_task('Pre debug command', pre_debug_command)
return r
return True
async def run_post_debug_task(self) -> bool:
post_debug_command = self.configuration.post_debug_task
if post_debug_command:
self._change_status('Running post debug command')
r = await self.run_task('Post debug command', post_debug_command)
return r
return True
async def run_task(self, name: str, task: TaskExpanded) -> bool:
try:
await self.listener.on_session_task_request(self, task)
return True
except core.CancelledError:
self.error(f'{name}: cancelled')
return False
except Exception as e:
core.exception()
self.error(f'{name}: {e}')
return False
def _refresh_state(self) -> None:
try:
thread = self.command_thread
if thread.stopped:
self._change_status('Paused')
self.state = Session.State.PAUSED
else:
self._change_status('Running')
self.state = Session.State.RUNNING
except core.Error as e:
self.state = Session.State.RUNNING
async def add_breakpoints(self) -> None:
assert self._transport
requests: list[Awaitable[Any]] = []
requests.append(self.set_exception_breakpoint_filters())
requests.append(self.set_function_breakpoints())
for file, filebreaks in self.breakpoints.source.breakpoints_per_file().items():
requests.append(self.set_breakpoints_for_file(file, filebreaks))
if self.capabilities.supportsDataBreakpoints:
requests.append(self.set_data_breakpoints())
if requests:
await core.wait(requests)
async def set_exception_breakpoint_filters(self) -> None:
if not self._transport:
return
filters: list[str] = []
filterOptions: list[dap.ExceptionFilterOptions] = []
for f in self.breakpoints.filters:
if f.enabled:
filters.append(f.dap.filter)
filterOptions.append(dap.ExceptionFilterOptions(
f.dap.filter,
f.condition,
))
await self.request('setExceptionBreakpoints', {
'filters': filters,
'filterOptions': filterOptions
})
async def set_function_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.function))
if not self.capabilities.supportsFunctionBreakpoints:
# only show error message if the user tried to set a function breakpoint when they are not supported
if breakpoints:
self.error('This debugger does not support function breakpoints')
return
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setFunctionBreakpoints', {
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
for result, b in zip(results, breakpoints):
self.breakpoints.function.set_result(b, result)
async def set_data_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.data))
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setDataBreakpoints', {
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
for result, b in zip(results, breakpoints):
self.breakpoints.data.set_result(b, result)
async def set_breakpoints_for_file(self, file: str, breakpoints: list[SourceBreakpoint]) -> None:
if not self._transport:
return
enabled_breakpoints: list[SourceBreakpoint] = []
dap_breakpoints: list[dap.SourceBreakpoint] = []
for breakpoint in breakpoints:
if breakpoint.dap.hitCondition and not self.capabilities.supportsHitConditionalBreakpoints:
self.error('This debugger does not support hit condition breakpoints')
if breakpoint.dap.logMessage and not self.capabilities.supportsLogPoints:
self.error('This debugger does not support log points')
if breakpoint.dap.condition and not self.capabilities.supportsConditionalBreakpoints:
self.error('This debugger does not support conditional breakpoints')
if breakpoint.enabled:
enabled_breakpoints.append(breakpoint)
dap_breakpoints.append(breakpoint.dap)
try:
response = await self.request('setBreakpoints', {
'source': { 'path': file },
'breakpoints': dap_breakpoints
})
results: list[dap.Breakpoint] = response['breakpoints']
if len(results) != len(enabled_breakpoints):
raise Error('expected #breakpoints to match results')
for result, b in zip(results, enabled_breakpoints):
self.breakpoints.source.set_result(b, result)
if result.id:
self.breakpoints_for_id[result.id] = b
except Error as e:
for b in enabled_breakpoints:
self.breakpoints.source.set_result(b, dap.Breakpoint())
def on_send_data_breakpoints(self, any: Any):
core.run(self.set_data_breakpoints())
def on_send_function_breakpoints(self, any: Any):
core.run(self.set_function_breakpoints())
def on_send_filters(self, any: Any):
core.run(self.set_exception_breakpoint_filters())
def on_send_source_breakpoint(self, breakpoint: SourceBreakpoint) -> None:
file = breakpoint.file
core.run(self.set_breakpoints_for_file(file, self.breakpoints.source.breakpoints_for_file(file)))
async def stop(self):
# this seems to be what the spec says to do in the overview
# https://microsoft.github.io/debug-adapter-protocol/overview
# haven't started session yet
if self._transport is None:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
# If the stop is called multiple times then we call disconnect to forefully disconnect
if self.stop_requested:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
self._change_status('Stop Requested')
self.stop_requested = True
# first try to terminate if we can
if self.launch_request and self.capabilities.supportsTerminateRequest:
try:
await self.request('terminate', {
'restart': False
})
return
except Error as e:
core.exception()
# we couldn't terminate either not a launch request or the terminate request failed
# so we foreceully disconnect
await self.request('disconnect', {
'restart': False
})
def stop_debug_adapter_session(self):
if self.launching_async:
self.launching_async.cancel()
self.breakpoints_for_id = {}
self.watch.clear_session_data(self)
self.breakpoints.clear_session_data()
self.stop_requested = False
if self._transport:
self.adapter_configuration.did_stop_debugging(self)
self._transport.dispose()
self._transport = None
async def stop_forced(self, reason: int) -> None:
if self.state == Session.State.STOPPING or self.state == Session.State.STOPPED:
return
self.stopped_reason = reason
self.state = Session.State.STOPPING
self.stop_debug_adapter_session()
await self.run_post_debug_task()
self._change_status('Ended')
self.state = Session.State.STOPPED
if not self.complete.done():
self.complete.set_result(None)
def dispose(self) -> None:
self.stop_debug_adapter_session()
for disposeable in self.disposeables:
disposeable.dispose()
if self.parent:
self.parent.children.remove(self)
self.parent = None
# clean up hierarchy if needed
for child in self.children:
child.parent = None
async def resume(self):
body = await self.request('continue', {
'threadId': self.command_thread.id
})
# some adapters aren't giving a response here
if body:
allThreadsContinued = body.get('allThreadsContinued', True)
else:
allThreadsContinued = True
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, allThreadsContinued))
async def pause(self):
await self.request('pause', {
'threadId': self.command_thread.id
})
async def step_over(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('next', {
'threadId': self.command_thread.id
})
async def step_in(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepIn', {
'threadId': self.command_thread.id
})
async def step_out(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepOut', {
'threadId': self.command_thread.id
})
async def evaluate(self, expression: str, context: str = 'repl'):
result = await self.evaluate_expression(expression, context)
if not result:
raise Error('expression did not return a result')
# variablesReference doesn't appear to be optional in the spec... but some adapters treat it as such
event = dap.OutputEvent(result.result + '\n', 'console', variablesReference=result.variablesReference)
self.listener.on_session_output_event(self, event)
async def evaluate_expression(self, expression: str, context: str|None) -> dap.EvaluateResponse:
frameId: int|None = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('evaluate', {
'expression': expression,
'context': context,
'frameId': frameId,
})
# the spec doesn't say this is optional? But it seems that some implementations throw errors instead of marking things as not verified?
if response['result'] is None:
raise Error('expression did not return a result')
return response
async def read_memory(self, memory_reference: str, count: int, offset: int) -> dap.ReadMemoryResponse:
v = await self.request('readMemory', {
'memoryReference': memory_reference,
'count': count,
'offset': offset
})
return v
async def stack_trace(self, thread_id: int) -> list[dap.StackFrame]:
body = await self.request('stackTrace', {
'threadId': thread_id,
})
return body['stackFrames']
async def completions(self, text: str, column: int) -> list[dap.CompletionItem]:
frameId = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('completions', {
'frameId': frameId,
'text': text,
'column': column,
})
return response['targets']
async def set_variable(self, variablesReference: int, name: str, value: str) -> dap.SetVariableResponse:
response = await self.request('setVariable', {
'variablesReference': variablesReference,
'name': name,
'value': value,
})
return response
async def data_breakpoint_info(self, variablesReference: int, name: str) -> dap.DataBreakpointInfoResponse:
response = await self.request('dataBreakpointInfo', {
'variablesReference': variablesReference,
'name': name,
})
return response
def log(self, type: str, value: str) -> None:
if type == 'process':
self.transport_log.info(f'⟹ process/stderr :: {value.strip()}')
return
if type == 'error':
output = dap.OutputEvent(value + '\n', 'debugger.error')
self.listener.on_session_output_event(self, output)
return
output = dap.OutputEvent(value + '\n', 'debugger.info')
self.listener.on_session_output_event(self, output)
def load_frame(self, frame: Optional[dap.StackFrame]):
self.listener.on_session_selected_frame(self, frame)
if frame:
core.run(self.refresh_scopes(frame))
core.run(self.watch.evaluate(self, frame))
else:
self.variables.clear()
self.listener.on_session_updated_variables(self)
async def refresh_scopes(self, frame: dap.StackFrame):
body = await self.request('scopes', {
'frameId': frame.id
})
scopes: list[dap.Scope] = body['scopes']
self.variables = [Variable.from_scope(self, scope) for scope in scopes]
self.listener.on_session_updated_variables(self)
async def get_source(self, source: dap.Source) -> tuple[str, str|None]:
body = await self.request('source', {
'source': {
'path': source.path,
'sourceReference': source.sourceReference
},
'sourceReference': source.sourceReference
})
return body['content'], body.get('mimeType')
async def get_variables(self, variablesReference: int, without_names: bool = False) -> list[Variable]:
response = await self.request('variables', {
'variablesReference': variablesReference
})
variables: list[dap.Variable] = response['variables']
# vscode seems to remove the names from variables in output events
if without_names:
for v in variables:
v.name = ''
v.value = v.value.split('\n')[0]
return [Variable.from_variable(self, variablesReference, v) for v in variables]
def on_breakpoint_event(self, event: dap.BreakpointEvent):
assert event.breakpoint.id
b = self.breakpoints_for_id.get(event.breakpoint.id)
if b:
self.breakpoints.source.set_result(b, event.breakpoint)
def on_module_event(self, event: dap.ModuleEvent):
if event.reason == 'new':
self.modules[event.module.id] = event.module
if event.reason == 'removed':
try:
del self.modules[event.module.id]
except KeyError:
...
if event.reason == 'changed':
self.modules[event.module.id] = event.module
self.listener.on_session_updated_modules(self)
def on_loaded_source_event(self, event: dap.LoadedSourceEvent):
id = f'{event.source.name}~{event.source.path}~{event.source.sourceReference}'
if event.reason == 'new':
self.sources[id] = event.source
elif event.reason == 'removed':
try:
del self.sources[id]
except KeyError:
...
elif event.reason == 'changed':
self.sources[id] = event.source
self.listener.on_session_updated_sources(self)
# this is a bit of a weird case. Initialized will happen at some point in time
# it depends on when the debug adapter chooses it is ready for configuration information
# when it does happen we can then add all the breakpoints and complete the configuration
# NOTE: some adapters appear to send the initialized event multiple times
@core.schedule
async def on_initialized_event(self):
try:
await self.add_breakpoints()
except core.Error as e:
self.error('there was an error adding breakpoints {}'.format(e))
if self.capabilities.supportsConfigurationDoneRequest:
try:
await self.request('configurationDone', None)
except core.Error as e:
self.error('there was an error in configuration done {}'.format(e))
def on_output_event(self, event: dap.OutputEvent):
self.listener.on_session_output_event(self, event)
@core.schedule
async def on_terminated_event(self, event: dap.TerminatedEvent):
await self.stop_forced(reason=Session.stopped_reason_terminated_event)
# TODO: This needs to be handled inside debugger_sessions
# restarting needs to be handled by creating a new session
# if event.restart:
# await self.launch(self.adapter_configuration, self.configuration, event.restart)
@core.schedule
async def on_transport_closed(self):
await self.stop_forced(reason=Session.stopped_reason_terminated_event)
async def on_reverse_request(self, command: str, arguments: Any):
if command == 'runInTerminal':
response = await self.on_run_in_terminal(arguments)
return response
assert self.adapter_configuration
response = await self.adapter_configuration.on_custom_request(self, command, arguments)
if response is None:
raise core.Error(f'reverse request not implemented {command}')
return response
async def on_run_in_terminal(self, request: dap.RunInTerminalRequestArguments) -> dap.RunInTerminalResponse:
try:
return await self.listener.on_session_terminal_request(self, request)
except core.Error as e:
self.error(str(e))
raise e
@property
def command_thread(self) -> Thread:
if self.selected_thread:
return self.selected_thread
if self.threads:
return self.threads[0]
raise core.Error('No threads to run command')
def get_thread(self, id: int):
t = self.threads_for_id.get(id)
if t:
return t
else:
t = Thread(self, id, '??', self.all_threads_stopped)
self.threads_for_id[id] = t
return t
def set_selected(self, thread: Thread, frame: Optional[dap.StackFrame]):
self.select(thread, frame, explicitly=True)
self.listener.on_session_updated_threads(self)
self._refresh_state()
# after a successfull launch/attach, stopped event, thread event we request all threads
# see https://microsoft.github.io/debug-adapter-protocol/overview
# updates all the threads from the dap model
# @NOTE threads_for_id will retain all threads for the entire session even if they are removed
@core.schedule
async def refresh_threads(self):
response = await self.request('threads', None)
threads: list[dap.Thread] = response['threads']
self.threads.clear()
for thread in threads:
t = self.get_thread(thread.id)
t.name = thread.name
self.threads.append(t)
self.listener.on_session_updated_threads(self)
def on_threads_event(self, event: dap.ThreadEvent) -> None:
self.refresh_threads()
def on_stopped_event(self, stopped: dap.StoppedEvent):
description = stopped.description
text = stopped.text
reason = stopped.reason
if description and text:
stopped_text = "Stopped: {}: {}".format(description, text)
elif text or description or reason:
stopped_text = "Stopped: {}".format(text or description or reason)
else:
stopped_text = "Stopped"
if stopped.allThreadsStopped or False:
self.all_threads_stopped = True
for thread in self.threads:
thread.clear()
thread.stopped = True
thread_id = stopped.threadId
assert thread_id # not sure why this is optional...
# @NOTE this thread might be new and not in self.threads so we must update its state explicitly
thread = self.get_thread(thread_id)
thread.clear()
thread.stopped = True
thread.stopped_reason = stopped_text
if not self.selected_explicitly:
self.select(thread, None, explicitly=False)
self.expand_thread(thread)
self.listener.on_session_updated_threads(self)
self.refresh_threads()
self._refresh_state()
@core.schedule
async def expand_thread(self, thread: Thread):
children = await thread.children()
if children and not self.selected_frame and not self.selected_explicitly and self.selected_thread is thread:
def first_non_subtle_frame(frames: list[dap.StackFrame]):
for frame in frames:
if frame.presentationHint != 'subtle':
return frame
return frames[0]
frame = first_non_subtle_frame(children)
self.select(thread, frame, explicitly=False)
self.listener.on_session_updated_threads(self)
self._refresh_state()
def on_continued_event(self, continued: dap.ContinuedEvent):
if continued.allThreadsContinued:
self.all_threads_stopped = False
for thread in self.threads:
thread.stopped = False
thread.stopped_reason = ''
# @NOTE this thread might be new and not in self.threads so we must update its state explicitly
thread = self.get_thread(continued.threadId)
thread.stopped = False
thread.stopped_reason = ''
if continued.allThreadsContinued or thread is self.selected_thread:
self.select(None, None, explicitly=False)
self.listener.on_session_updated_threads(self)
self._refresh_state()
def select(self, thread: Optional[Thread], frame: Optional[dap.StackFrame], explicitly: bool):
if frame and not thread:
raise core.Error('Expected thread')
self.selected_explicitly = explicitly
self.selected_thread = thread
self.selected_frame = frame
self.load_frame(frame)
def on_event(self, event: str, body: Any):
if event == 'initialized':
self.on_initialized_event()
elif event == 'output':
self.on_output_event(body)
elif event == 'continued':
self.on_continued_event(body)
elif event == 'stopped':
self.on_stopped_event(body)
elif event == 'terminated':
self.on_terminated_event(body)
elif event == 'thread':
self.on_threads_event(body)
elif event == 'breakpoint':
self.on_breakpoint_event(body)
elif event == 'module':
self.on_module_event(body)
elif event == 'loadedSource':
self.on_loaded_source_event(body)
else:
core.error(f'event ignored not implemented {event}')
class Thread:
def __init__(self, session: Session, id: int, name: str, stopped: bool):
self.session = session
self.id = id
self.name = name
self.stopped = stopped
self.stopped_reason = ''
self._children: Optional[core.Future[list[dap.StackFrame]]] = None
def has_children(self) -> bool:
return self.stopped
def children(self) -> Awaitable[list[dap.StackFrame]]:
if not self.stopped:
raise core.Error('Cannot get children of thread that is not stopped')
if self._children:
return self._children
self._children = core.run(self.session.stack_trace(self.id))
return self._children
def clear(self):
self._children = None
| StarcoderdataPython |
4908056 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/24 9:37
# @Author : ganliang
# @File : test_tf_bpn.py
# @Desc : bpn反向传播
import unittest
from mtensorflow import tf_bpn
class TestTensorflowBpn(unittest.TestCase):
def test_bpn(self):
tf_bpn.bpn(epochs=1, batch_size=1000, learning_rate=0.01)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5013679 | from absl import app
from absl import flags
import time
from os.path import expanduser
import os
import signal
from .mv_screenshot import mv_default
from .settings import init, change_savedir, change_jikanwari
from .daemonize import interval_run, start_daemon, log_to_stdout
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'reset',
False,
'reset all settings of this program',
short_name='r')
flags.DEFINE_bool(
'settings',
False,
'edit save directory',
short_name='es')
flags.DEFINE_bool(
'jikanwari',
False,
'edit jikanwari',
short_name='ej')
flags.DEFINE_bool(
'startdaemon',
False,
'daemonize program',
short_name='start')
flags.DEFINE_bool(
'stopdaemon',
False,
'stop daemon',
short_name='kill')
flags.DEFINE_bool(
'test',
True,
'test',
short_name='t')
flags.DEFINE_bool(
'exec',
True,
'test',
short_name='e')
def screenshot_daemon(arg=None, kws=None, pf=log_to_stdout):
while True:
mv_default()
time.sleep(60)
home = expanduser("~")
base = os.path.join(home, ".myscreenshot")
pidpath = os.path.join(base, "python_daemon.pid")
start_daemon = start_daemon(log_to_stdout)
screenshot_daemon = interval_run(1)(screenshot_daemon)
def main(argv):
if FLAGS.jikanwari:
change_jikanwari()
return 0
if FLAGS.reset:
init()
if FLAGS.settings:
change_savedir()
if FLAGS.stopdaemon:
if not os.path.isfile(pidpath):
return None
with open(pidpath, "rb") as f:
pid = f.read().rstrip()
os.kill(int(pid), signal.SIGKILL)
os.remove(pidpath)
print("sukusho-daemon is safely killed")
return None
if FLAGS.test:
mv_default(test=True)
if not FLAGS.startdaemon:
print("Your Sukusho is ready!!")
if FLAGS.startdaemon:
if os.path.isfile(pidpath):
print("sukusho-daemon is already started.")
return None
print("sukusho-daemon started!")
start_daemon(
screenshot_daemon,
'screenshot_daemon',
pidpath=os.path.join(base, "python_daemon.pid"),
logpath=os.path.join(base, "python_daemon.log"),
kws=None
)
if FLAGS.exec:
mv_default(test=False)
def sukusho():
app.run(main)
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
3400168 | <reponame>marcus-pham/test<filename>book/code/nba - project 5 get list of all couch.py
# already try with requests, this module not work
from bs4 import BeautifulSoup
from selenium import webdriver
# from original link nba.com
# get all coaches name
driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
url = 'http://www.nba.com/news/coaches/index.html?ls=iref:nba:gnav'
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'lxml')
section = soup.find('section',{'id':'nbaArticleContent'})
for p in section.find_all('p'):
for a in p.find_all('a'):
# print a.text + "is coach of " + a.find_previous_sibling().text.replace(':','') + " team"
print a.text + " ---> " + a.find_previous_sibling().text.replace(':','')
driver.quit()
| StarcoderdataPython |
4903593 | <reponame>csinchok/drf-nested-serializer<filename>nested_serializers/serializers.py<gh_stars>0
from django.db import transaction
from rest_framework import serializers
from rest_framework.fields import set_value, empty
from rest_framework.compat import OrderedDict
from rest_framework.serializers import ValidationError
from rest_framework.utils import model_meta
from rest_framework.utils.field_mapping import get_nested_relation_kwargs
class NestedListSerializer(serializers.ListSerializer):
def create(self, validated_data):
return_instances = []
for child_data in validated_data:
if child_data.get("id", empty) is empty:
# we don't want to descend into creating objects, so throw a validation error
# here and inform the user to create the related object before saving the
# instance in operation
raise ValidationError("Nested objects must exist prior to creating this parent instance.")
else:
# We have an id, so let's grab this sumbitch
ModelClass = self.child.Meta.model
child_instance = ModelClass.objects.get(pk=child_data["id"])
return_instances.append(self.child.update(child_instance, child_data))
return return_instances
def update(self, instance, validated_data):
# instance is a qs...
current_objects = {obj.id: obj for obj in instance}
return_instances = []
for child_data in validated_data:
try:
if child_data.get("id", empty) is empty:
# The id field is empty, so this is a create
if "id" in child_data:
# Need to kill this, because it's technically a read-only field
del child_data["id"]
return_instances.append(self.child.create(child_data))
else:
# We have an id, so let's grab this sumbitch
child_instance = current_objects.pop(child_data["id"])
return_instances.append(self.child.update(child_instance, child_data))
except AttributeError:
# this is an actual object, so add it to the return instances
return_instances.append(child_data)
return return_instances
class NestedModelSerializer(serializers.ModelSerializer):
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return NestedListSerializer(*args, **kwargs)
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(NestedModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
field_kwargs["read_only"] = False
if field_kwargs.get("many"):
field_kwargs["required"] = False
return field_class, field_kwargs
def to_internal_value(self, data):
try:
ret = super(NestedModelSerializer, self).to_internal_value(data)
except AssertionError:
raise ValidationError({self.__class__.__name__: "Cannot descend and create nested objects."})
# So, in the case that this object is nested, we really really need the id.
if getattr(self, 'parent', None):
child_model = self.Meta.model
pk_field_name = child_model._meta.pk.name
pk_field = self.fields[pk_field_name]
primitive_value = pk_field.get_value(data)
set_value(ret, pk_field.source_attrs, primitive_value)
errors = OrderedDict()
return ret
def update(self, instance, validated_data):
"""This methods acts just like it's parent, except that it creates and updates nested object"""
m2m_fields = {}
for key, field in self.fields.items():
if isinstance(field, serializers.BaseSerializer):
child_instances = getattr(instance, key)
# If this field is a serializer, we probably are dealing with a nested object
if isinstance(validated_data.get(key), list):
# This will get handled in NestedListSerializer...
nested_data = validated_data.pop(key)
updated_data = field.update(child_instances.all(), nested_data)
m2m_fields[key] = updated_data
elif isinstance(validated_data.get(key), (dict, OrderedDict)):
# Looks like we're dealing with some kind of ForeignKey
nested_data = validated_data.pop(key)
if nested_data.get("id", empty) is empty:
# No id, so it looks like we've got a create...
try:
del nested_data["id"]
except KeyError:
pass
child_instance = field.create(nested_data)
else:
# Update
ChildClass = field.Meta.model
try:
child_instance = ChildClass.objects.get(pk=nested_data["id"])
except ChildClass.DoesNotExist:
child_instance = field.create(nested_data)
else:
del nested_data["id"]
child_instance = field.update(child_instance, nested_data)
validated_data[key] = child_instance
elif validated_data.get(key, True) is None:
# null value passed - check if null allowed for field
ModelClass = self.Meta.model
model_field = ModelClass._meta.get_field(key)
if model_field.null:
validated_data[key] = None
# get the instance from super
instance = super(NestedModelSerializer, self).update(instance, validated_data)
# updated m2m fields
for field_name, related_instances in m2m_fields.items():
with transaction.atomic():
try:
field = getattr(instance, field_name)
field.clear()
field.add(*related_instances)
except Exception as exc:
pass
# dump the instance
return instance
def create(self, validated_data):
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
# They are not valid arguments to the default `.create()` method,
# as they require that the instance has already been saved.
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
# Save off the data
for key, field in self.fields.items():
if isinstance(field, serializers.BaseSerializer):
if isinstance(validated_data.get(key), list):
# One-to-many...
nested_data = validated_data.pop(key)
many_to_many[key] = field.create(nested_data)
elif isinstance(validated_data.get(key), dict):
# ForeignKey
nested_data = validated_data.pop(key)
if nested_data.get("id", empty) is empty:
# we don't want to descend into creating objects, so throw a validation error
# here and inform the user to create the related object before saving the
# instance in operation
raise ValidationError("Nested objects must exist prior to creating this parent instance.")
else:
# Update
ChildClass = field.Meta.model
try:
child_instance = ChildClass.objects.get(pk=nested_data["id"])
except ChildClass.DoesNotExist:
child_instance = field.create(nested_data)
else:
del nested_data["id"]
child_instance = field.update(child_instance, nested_data)
validated_data[key] = child_instance
# Create the base instance
instance = ModelClass.objects.create(**validated_data)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
setattr(instance, field_name, value)
return instance
| StarcoderdataPython |
9706541 | <reponame>Artanis/icecrate<gh_stars>1-10
from setuptools import setup, find_packages
def get_version():
version_file = "icecrate/_version.py"
loc = {}
with open(version_file, "r") as f_version:
code = compile(f_version.read(), "_version.py", "exec")
exec(code, {}, loc)
return loc.get("__version__")
setup(
# package information
name="icecrate",
version=get_version(),
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=[
"bottle>=0.12-dev",
"redis",
"PyDispatcher",
"Whoosh",
"pyxdg"],
dependency_links=[
# get bottle 0.12 from github
"https://github.com/defnull/bottle/tarball/master#egg=bottle-0.12-dev"],
# metadata
author="<NAME>",
author_email="<EMAIL>",
license="BSD 2-clause",
url="https://github.com/Artanis/icecrate"
)
| StarcoderdataPython |
6685565 | <filename>source/main.py
import os
import pandas
from pandas.errors import ParserError
from scipy.signal import savgol_filter
import csv
class StackDiffraction:
def __init__(self):
"""The lines below are commented out as they are defunct in the
GUI implementation"""
#self.path = path
#self.files = self.GetFiles()
#self.data = [["2theta",*self.files],self.GetThetaValues(),*self.GetHistograms()]
def GetFiles(self,path,extension=".csv"):
files = list(filter(lambda f: f.endswith(extension),
os.listdir(path)))
return sorted(files,key = lambda f: os.path.getctime(path+"/"+f))
def GetThetaValues(self,path):
rows_to_skip = 1
read_file_successful = False
while read_file_successful == False:
try:
df = pandas.read_csv(path+"/"+self.files[0],
skiprows = rows_to_skip,
header=None)
read_file_successful = True
except ParserError:
rows_to_skip += 1
df.columns = df.iloc[0]
df = df[1:]
return df["x"].astype("float32").to_list()
def GetHistograms(self, path, files, separation,
bkg_subt=False,
norm=False,
smooth = False,
separate=False):
histogram_INT = []
offset = 0
for index,file in enumerate(files):
print(f"{index}\t -- \t{file}")
rows_to_skip = 1
read_file_successful = False
while read_file_successful == False:
try:
df = pandas.read_csv(path+"/"+file,
skiprows = rows_to_skip,
header=None)
read_file_successful = True
except ParserError:
rows_to_skip += 1
df.columns = df.iloc[0]
df = df[1:]
if not(bkg_subt or norm or smooth or separate):
histogram_INT.append((df["y_obs"].astype("float32").to_list()))
else:
df["y"] = df["y_obs"].astype("float32")
if bkg_subt:
df["y"] = (abs(df["y"] - df["y_bkg"].astype("float32")))
if norm:
df["y"] = (df["y"]/max(df["y"]))
if smooth:
df["y"] = (savgol_filter(df["y"],11,2))
if separate:
df["y"] = (df["y"] + offset)
offset += separation
histogram_INT.append((df["y"].to_list()))
return histogram_INT
def SaveCSV(self,filename):
with open(filename,"w",newline="") as f:
csvwriter = csv.writer(f,delimiter=",")
csvwriter.writerow(self.data[0])
csvwriter.writerows(list(zip(*self.data[1:])))
if __name__ == "__main__":
"""The lines below are commented out as they they are defunct in the GUI implementation."""
#testfunc = StackDiffraction("G:\\My Drive\\SBU\\PyStackXRD\\SampleDirectory")
#testfunc.SaveCSV() | StarcoderdataPython |
1696373 | <filename>menu/menu_api.py<gh_stars>1-10
# General Package Import
import re
from flask import json, jsonify, request, abort
from flask.views import MethodView
# User Package Import
from utilities.errorcodes import *
from utilities.filelogger import Log
from utilities.databaselayer import DatabaseLayer
from utilities.validator import validate_input
from utilities.schema import menu_schema
from user.decorators import user_required
from settings import database
# Class Menu API:
# This class acts as an interface to resolve HTTP petitions
class Menu_API(MethodView):
decorators = [user_required]
def __init__(self):
lgr = Log('MENUAPI')
self.log = lgr.getLog()
def get(self, restaurant_id):
try:
self.log.info('Petition GET route INI')
response = jsonify({}), 500
menu = database.retrieveMenu(restaurant_id)
self.log.debug(menu)
if menu:
self.log.info('Menu acquired succesfully')
del menu['_id']
self.log.debug(menu)
response = jsonify(menu), 200
else:
error = {"code":err_ids}
response = jsonify({"error":error}), 404
self.log.info('Petition GET route END')
except:
error = {"code":err_unk}
response = jsonify({"error":error}), 500
finally:
return response
def post(self):
try:
self.log.info('Petition POST route INI')
response = jsonify({}), 500
menu = request.json
self.log.debug(menu)
if database.retrieveMenu(menu['restaurant_id']) is not None:
error = {"code":err_aex}
response = jsonify({"error":error}), 418
else:
requester = request.headers.get('X-USR-ID')
self.log.debug(requester)
user = database.retrieveUser(requester)
acc = user.get('access')
if (acc is not None) & (menu['restaurant_id'] in acc):
if validate_input(menu, menu_schema, self.log):
self.log.info('Creating Menu')
result = database.createMenu(menu)
if result:
response = jsonify({"result":"OK"}), 200
else:
error = {"error":err_dbf}
response = jsonify({"error":error}), 500
else:
error = {"code":err_frm}
response = jsonify({"error":error}), 400
else:
error = {"code":err_nau}
response = jsonify({"error":error}), 403
self.log. info('Petition POST route END')
except:
error = {"error":err_unk}
resonse = jsonify({"error":error}), 500
finally:
return response
def put(self, restaurant_id):
try:
self.log.info('Petition PUT route INI')
response = jsonify({}), 500
menu = request.json
self.log.info(menu)
m = database.retrieveMenu(restaurant_id)
self.log.info(m)
if database.retrieveMenu(restaurant_id):
requester = request.headers.get('X-USR-ID')
self.log.debug(requester)
user = database.retrieveUser(requester)
self.log.debug(user)
acc = user.get('access')
self.log.info(acc)
if (acc is not None) & (restaurant_id in acc):
if validate_input(menu, menu_schema, self.log):
if database.updateMenu(restaurant_id, menu):
response = jsonify({"result":"OK"}), 200
else:
error = {"code":err_dbf}
response = jsonify({"error":error}), 500
else:
error = {"code":err_frm}
response = jsonify({"error":error}), 400
else:
error = {"code":err_nau}
response = jsonify({"error":error}), 403
else:
error = {"code":err_ids}
response = jsonify({"error":error}), 404
self.log.info('Petition PUT route END')
except:
error = {"error":err_unk}
resonse = jsonify({"error":error}), 500
finally:
return response
def delete(self, restaurant_id):
try:
self.log.info('Petition DELETE route INI')
response = jsonify({}), 500
requester = request.headers.get('X-USR-ID')
user = database.retrieveUser(requester)
if restaurant_id in user.get('access'):
if database.deleteMenu(restaurant_id):
response = jsonify({"result":"OK"}), 200
else:
error = {"code":err_dbf}
response = jsonify({"error":error}), 500
else:
error = {"code":err_nau}
response = jsonify({"error":error}), 403
self.log.info('Petition DELETE route END')
except:
error = {"error":err_unk}
resonse = jsonify({"error":error}), 500
finally:
return response
| StarcoderdataPython |
9616036 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './window.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(318, 326)
MainWindow.setMinimumSize(QtCore.QSize(318, 326))
MainWindow.setMaximumSize(QtCore.QSize(318, 326))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.screen = QtWidgets.QLCDNumber(self.centralwidget)
self.screen.setGeometry(QtCore.QRect(0, 0, 311, 211))
font = QtGui.QFont()
font.setBold(True)
self.screen.setFont(font)
self.screen.setObjectName("screen")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(0, 210, 311, 61))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.input = QtWidgets.QLineEdit(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
self.input.setFont(font)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.verticalLayout.addLayout(self.horizontalLayout)
self.con = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
self.con.setFont(font)
self.con.setObjectName("con")
self.verticalLayout.addWidget(self.con)
MainWindow.setCentralWidget(self.centralwidget)
self.menus = QtWidgets.QMenuBar(MainWindow)
self.menus.setGeometry(QtCore.QRect(0, 0, 318, 22))
self.menus.setObjectName("menus")
self.help = QtWidgets.QMenu(self.menus)
self.help.setObjectName("help")
MainWindow.setMenuBar(self.menus)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.about = QtWidgets.QAction(MainWindow)
self.about.setObjectName("about")
self.version = QtWidgets.QAction(MainWindow)
self.version.setObjectName("version")
self.more = QtWidgets.QAction(MainWindow)
self.more.setObjectName("more")
self.help.addAction(self.about)
self.menus.addAction(self.help.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "人数:"))
self.input.setText(_translate("MainWindow", "48"))
self.con.setText(_translate("MainWindow", "生 成"))
self.help.setTitle(_translate("MainWindow", "帮助"))
self.about.setText(_translate("MainWindow", "关于"))
self.version.setText(_translate("MainWindow", "版本信息"))
self.more.setText(_translate("MainWindow", "批量生成"))
| StarcoderdataPython |
6602492 | from asyncio import Future, ensure_future, wait_for, TimeoutError
class CombineMessage:
def __init__(self):
self.message_type_list = list()
self.combine_message = None
self.error_message = None
self.timeout = 1.0
self.keep_origin = False
self.future = Future()
self.node_key = None
self.store = None
def active(self):
ensure_future(self._active())
async def _active(self):
try:
await wait_for(self.future, self.timeout)
await self.store.dispatch(self.node_key, self.combine_message)
except TimeoutError:
if self.node_key in self.store:
reducer_opt = await self.store.get_or_create_cell(self.node_key, None)
if reducer_opt.is_some:
reducer = reducer_opt.unwrap()
if self in reducer.combine_message_list:
reducer.combine_message_list.remove(self)
await self.store.dispatch(self.node_key, self.error_message)
__all__ = ["CombineMessage"]
| StarcoderdataPython |
4889340 | <reponame>Marghrid/Forest
from .condition_distinguisher import ConditionDistinguisher
from .regex_distinguisher import RegexDistinguisher
| StarcoderdataPython |
3320444 | """
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import pandas as pd
from copy import deepcopy
from collections import defaultdict
from gs_quant.markets.portfolio import Portfolio
from typing import Iterable
from gs_quant.target.instrument import Cash
from gs_quant.backtests.core import ValuationMethod
from gs_quant.backtests.order import OrderBase, OrderCost
from gs_quant.backtests.event import FillEvent
from gs_quant.backtests.data_handler import DataHandler
import numpy as np
import datetime as dt
class BackTest(object):
def __init__(self, strategy, states, risks):
self._portfolio_dict = defaultdict(Portfolio) # portfolio by state
self._cash_dict = defaultdict(float) # cash by state
self._scaling_portfolios = defaultdict(list) # list of ScalingPortfolio
self._cash_payments = defaultdict(list) # list of cash payments (entry, unwind)
self._strategy = deepcopy(strategy) # the strategy definition
self._states = states # list of states
self._results = defaultdict(list)
self._risks = tuple(risks) # list of risks to calculate
self._calc_calls = 0
self._calculations = 0
@property
def cash_dict(self):
return self._cash_dict
@property
def portfolio_dict(self):
return self._portfolio_dict
@portfolio_dict.setter
def portfolio_dict(self, portfolio_dict):
self._portfolio_dict = portfolio_dict
@property
def scaling_portfolios(self):
return self._scaling_portfolios
@scaling_portfolios.setter
def scaling_portfolios(self, scaling_portfolios):
self._scaling_portfolios = scaling_portfolios
@property
def cash_payments(self):
return self._cash_payments
@cash_payments.setter
def cash_payments(self, cash_payments):
self._cash_payments = cash_payments
@property
def states(self):
return self._states
@property
def results(self):
return self._results
def set_results(self, date, results):
self._results[date] = results
@property
def risks(self):
return self._risks
def add_results(self, date, results):
if date in self._results and len(self._results[date]):
self._results[date] += results
else:
self._results[date] = results
@property
def calc_calls(self):
return self._calc_calls
@calc_calls.setter
def calc_calls(self, calc_calls):
self._calc_calls = calc_calls
@property
def calculations(self):
return self._calculations
@calculations.setter
def calculations(self, calculations):
self._calculations = calculations
@property
def result_summary(self, allow_mismatch_risk_keys=True):
summary = pd.DataFrame({date: {risk: results[risk].aggregate(allow_mismatch_risk_keys)
for risk in results.risk_measures} for date, results in self._results.items()}).T
summary['Cash'] = pd.Series(self._cash_dict)
return summary.fillna(0)
class ScalingPortfolio(object):
def __init__(self, trade, dates, risk, csa_term=None):
self.trade = trade
self.dates = dates
self.risk = risk
self.csa_term = csa_term
self.results = None
class CashPayment(object):
def __init__(self, trade, effective_date=None, scale_date=None, direction=1):
self.trade = trade
self.effective_date = effective_date
self.scale_date = scale_date
self.direction = direction
class PredefinedAssetBacktest(object):
"""
:param data_handler: holds all the data required to run the backtest
:param performance: backtest values
:param cash_asset: currently restricted to USD non-accrual
:param holdings: a dictionary keyed by instruments with quantity values
:param historical_holdings: holdings for each backtest date
:param orders: a list of all the orders generated
:param initial_value: the initial value of the index
:param results: a dictionary which can be used to store intermediate results
"""
def __init__(self, data_handler: DataHandler):
self.data_handler = data_handler
self.performance = pd.Series()
self.cash_asset = Cash('USD')
self.holdings = defaultdict(float)
self.historical_holdings = pd.Series()
self.historical_weights = pd.Series()
self.orders = []
self.initial_value = 100
self.results = {}
def set_start_date(self, start: dt.date):
self.performance[start] = self.initial_value
self.holdings[self.cash_asset] = self.initial_value
def record_orders(self, orders: Iterable[OrderBase]):
self.orders.extend(orders)
def update_fill(self, fill: FillEvent):
inst = fill.order.instrument
self.holdings[self.cash_asset] -= fill.filled_price * fill.filled_units
self.holdings[inst] += fill.filled_units
def mark_to_market(self, state: dt.datetime, valuation_method: ValuationMethod):
epsilon = 1e-12
date = state.date()
mtm = 0
self.historical_holdings[date] = {}
self.historical_weights[date] = {}
for instrument, units in self.holdings.items():
if abs(units) > epsilon:
self.historical_holdings[date][instrument] = units
if isinstance(instrument, Cash):
fixing = 1
else:
tag, window = valuation_method.data_tag, valuation_method.window
if window:
start = dt.datetime.combine(state.date(), window.start)
end = dt.datetime.combine(state.date(), window.end)
fixings = self.data_handler.get_data_range(start, end, instrument, tag)
fixing = np.mean(fixings) if len(fixings) else np.nan
else: # no time window specified, use daily fixing
fixing = self.data_handler.get_data(state.date(), instrument, tag)
notional = fixing * units
self.historical_weights[date][instrument] = notional
mtm += notional
self.performance[date] = mtm
for instrument, notional in self.historical_weights[date].items():
self.historical_weights[date][instrument] = notional / mtm
def get_level(self, date: dt.date) -> float:
return self.performance[date]
def get_costs(self) -> pd.Series():
costs = defaultdict(float)
for order in self.orders:
if isinstance(order, OrderCost):
costs[order.execution_end_time().date()] += order.execution_quantity(self.data_handler)
return pd.Series(costs)
def get_orders_for_date(self, date: dt.date) -> pd.DataFrame():
return pd.DataFrame([order.to_dict(self.data_handler) for order in self.orders
if order.execution_end_time().date() == date])
| StarcoderdataPython |
3459966 | from typing import Text
import tensorflow as tf
import io
import json
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
import warnings
from tc_utils.util import numpy_to_tensor, tensor_to_numpy
from tc_logging.logger import Logger, LogReader
class TensorBoardLogger(Logger):
def __init__(self, log_dir: str):
super().__init__(log_dir = log_dir)
self.file_writer = tf.summary.create_file_writer(self.log_path)
def log_scalar(self, tag, scalar, step):
with self.file_writer.as_default():
tf.summary.scalar(tag, scalar, step=step)
def log_tensor(self, tag, tensor, step):
numpy_array = tensor_to_numpy(tensor)
serialized = self._serialize_numpy_array(numpy_array)
self._log_text(tag, serialized, step)
def flush(self):
pass
def save_checkpoint(self, save_dir, prefix=""):
warnings.warn("Checkpoint function not implemented for Tensorboard Logger.")
def load_checkpoint(self, save_dir, prefix=""):
warnings.warn("Checkpoint function not implemented for Tensorboard Logger.")
def _log_text(self, tag, text, step):
with self.file_writer.as_default():
tf.summary.text(tag, text, step=step)
def _serialize_numpy_array(self, np_array: np.array) -> str:
try:
memfile = io.BytesIO()
np.save(memfile, np_array)
memfile.seek(0)
serialized = json.dumps(memfile.read().decode('latin-1'))
memfile.close()
return serialized
except Exception as err:
raise Exception(f'An exception occured while serializing a numpy array: \n{err}')
class TensorBoardLogReader(LogReader):
def __init__(self, log_path: str, size_guide: int = 10000000) -> None:
super().__init__(log_path=log_path)
self.size_guidance = {event_accumulator.COMPRESSED_HISTOGRAMS: 0,
event_accumulator.IMAGES: 0,
event_accumulator.AUDIO: 0,
event_accumulator.SCALARS: 0,
event_accumulator.HISTOGRAMS: 0,
event_accumulator.TENSORS: size_guide,}
self.event_acc = event_accumulator.EventAccumulator(log_path, self.size_guidance)
self.event_acc.Reload()
def read_scalar(self, tag):
self.event_acc.Reload()
return_dict = {}
for entry in self.event_acc.Tensors(tag):
step = entry.step
scalar = self._decode_scalar(entry.tensor_proto)
return_dict[step] = scalar
return return_dict
def read_tensor(self, tag):
self.event_acc.Reload()
return_dict = {}
for entry in self.event_acc.Tensors(tag):
step = entry.step
bitstring = entry.tensor_proto.string_val[0]
np_array = self._deserialize_numpy_array(bitstring)
return_dict[step] = numpy_to_tensor(np_array)
return return_dict
def _decode_scalar(self, val):
tensor_bytes = val.tensor_content
tensor_dtype = val.dtype
tensor_shape = [x.size for x in val.tensor_shape.dim]
tensor_array = tf.io.decode_raw(tensor_bytes, tensor_dtype)
tensor_array = tf.reshape(tensor_array, tensor_shape)
return tensor_array.numpy()
def _deserialize_numpy_array(self, serialized: str) -> np.array:
try:
memfile = io.BytesIO()
memfile.write(json.loads(serialized).encode('latin-1'))
memfile.seek(0)
np_array = np.load(memfile)
memfile.close()
return np_array
except Exception as err:
raise Exception(f'An exception occured while deserializing a numpy array: \n{err}') | StarcoderdataPython |
6439555 | <reponame>t3hi3x/hue<gh_stars>10-100
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger(__name__)
class BaseRDBMSDataTable(object):
def __init__(self, cursor, columns, fetch_size=1000):
self.cursor = cursor
self.columns = columns
self.next = None
self.startRowOffset = 0
self.fetchSize = 1000
@property
def ready(self):
return True
@property
def has_more(self):
if not self.next:
self.next = list(self.cursor.fetchmany(self.fetchSize))
return bool(self.next)
def rows(self):
while self.has_more:
yield self.next.pop(0)
class BaseRDBMSResult(object):
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.columns = data_table.columns
self.ready = True
class BaseRDMSClient(object):
"""Same API as Beeswax"""
data_table_cls = None
result_cls = None
def __init__(self, query_server, user):
self.user = user
self.query_server = query_server
def create_result(self, datatable):
return self.result_cls(datatable)
def query(self, query, statement=0):
return self.execute_statement(query.get_query_statement(statement))
def explain(self, query):
q = query.get_query_statement(0)
if q.upper().startswith('EXPLAIN'):
return self.execute_statement(q)
else:
return self.execute_statement('EXPLAIN ' + q)
| StarcoderdataPython |
9686604 | import itertools
import math
import numpy as np
import pytest
from hypothesis import given, strategies as st
from scipy.sparse import coo_matrix
from tmtoolkit import bow
@given(dtm=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(
lambda size: st.lists(st.lists(st.integers(0, 10),
min_size=size[0], max_size=size[0]),
min_size=size[1], max_size=size[1])
),
matrix_type=st.integers(min_value=0, max_value=2))
def test_get_doc_lengths(dtm, matrix_type):
if matrix_type == 1:
dtm = np.matrix(dtm)
dtm_arr = dtm.A
elif matrix_type == 2:
dtm = coo_matrix(dtm)
dtm_arr = dtm.A
else:
dtm = np.array(dtm)
dtm_arr = dtm
if dtm_arr.ndim != 2:
with pytest.raises(ValueError):
bow.bow_stats.get_doc_lengths(dtm)
else:
doc_lengths = bow.bow_stats.get_doc_lengths(dtm)
assert doc_lengths.ndim == 1
assert doc_lengths.shape == (dtm_arr.shape[0],)
assert doc_lengths.tolist() == [sum(row) for row in dtm_arr]
@given(dtm=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(
lambda size: st.lists(st.lists(st.integers(0, 10),
min_size=size[0], max_size=size[0]),
min_size=size[1], max_size=size[1])
),
matrix_type=st.integers(min_value=0, max_value=2))
def test_get_doc_frequencies(dtm, matrix_type):
if matrix_type == 1:
dtm = np.matrix(dtm)
dtm_arr = dtm.A
elif matrix_type == 2:
dtm = coo_matrix(dtm)
dtm_arr = dtm.A
else:
dtm = np.array(dtm)
dtm_arr = dtm
if dtm.ndim != 2:
with pytest.raises(ValueError):
bow.bow_stats.get_doc_frequencies(dtm)
else:
n_docs = dtm.shape[0]
df_abs = bow.bow_stats.get_doc_frequencies(dtm)
assert isinstance(df_abs, np.ndarray)
assert df_abs.ndim == 1
assert df_abs.shape == (dtm_arr.shape[1],)
assert all([0 <= v <= n_docs for v in df_abs])
df_rel = bow.bow_stats.get_doc_frequencies(dtm, proportions=True)
assert isinstance(df_rel, np.ndarray)
assert df_rel.ndim == 1
assert df_rel.shape == (dtm_arr.shape[1],)
assert all([0 <= v <= 1 for v in df_rel])
def test_get_doc_frequencies2():
dtm = np.array([
[0, 2, 3, 0, 0],
[1, 2, 0, 5, 0],
[0, 1, 0, 3, 1],
])
df = bow.bow_stats.get_doc_frequencies(dtm)
assert df.tolist() == [1, 3, 1, 2, 1]
@given(dtm=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(
lambda size: st.lists(st.lists(st.integers(0, 10),
min_size=size[0], max_size=size[0]),
min_size=size[1], max_size=size[1])
),
matrix_type=st.integers(min_value=0, max_value=2),
proportions=st.booleans())
def test_get_codoc_frequencies(dtm, matrix_type, proportions):
if matrix_type == 1:
dtm = np.matrix(dtm)
elif matrix_type == 2:
dtm = coo_matrix(dtm)
else:
dtm = np.array(dtm)
if dtm.ndim != 2:
with pytest.raises(ValueError):
bow.bow_stats.get_codoc_frequencies(dtm, proportions=proportions)
return
n_docs, n_vocab = dtm.shape
if n_vocab < 2:
with pytest.raises(ValueError):
bow.bow_stats.get_codoc_frequencies(dtm, proportions=proportions)
return
df = bow.bow_stats.get_codoc_frequencies(dtm, proportions=proportions)
assert isinstance(df, dict)
assert len(df) == math.factorial(n_vocab) / math.factorial(2) / math.factorial(n_vocab - 2)
for w1, w2 in itertools.combinations(range(n_vocab), 2):
n = df[(w1, w2)]
if proportions:
assert 0 <= n <= 1
else:
assert 0 <= n <= n_docs
def test_get_codoc_frequencies2():
dtm = np.array([
[0, 2, 3, 0, 0],
[1, 2, 0, 5, 0],
[0, 1, 0, 3, 1],
])
df = bow.bow_stats.get_codoc_frequencies(dtm)
assert len(df) == math.factorial(5) / math.factorial(2) / math.factorial(3)
# just check a few
assert df.get((0, 1), df.get((1, 0))) == 1
assert df.get((1, 3), df.get((3, 1))) == 2
assert df.get((0, 2), df.get((2, 0))) == 0
@given(dtm=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(
lambda size: st.lists(st.lists(st.integers(0, 10),
min_size=size[0], max_size=size[0]),
min_size=size[1], max_size=size[1])
),
matrix_type=st.integers(min_value=0, max_value=2))
def test_get_term_frequencies(dtm, matrix_type):
if matrix_type == 1:
dtm = np.matrix(dtm)
dtm_arr = dtm.A
elif matrix_type == 2:
dtm = coo_matrix(dtm)
dtm_arr = dtm.A
else:
dtm = np.array(dtm)
dtm_arr = dtm
if dtm.ndim != 2:
with pytest.raises(ValueError):
bow.bow_stats.get_term_frequencies(dtm)
else:
tf = bow.bow_stats.get_term_frequencies(dtm)
assert tf.ndim == 1
assert tf.shape == (dtm_arr.shape[1],)
assert tf.tolist() == [sum(row) for row in dtm_arr.T]
@given(dtm=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(
lambda size: st.lists(st.lists(st.integers(0, 10),
min_size=size[0], max_size=size[0]),
min_size=size[1], max_size=size[1])
),
matrix_type=st.integers(min_value=0, max_value=2))
def test_get_term_proportions(dtm, matrix_type):
if matrix_type == 1:
dtm = np.matrix(dtm)
dtm_arr = dtm.A
dtm_flat = dtm.A1
elif matrix_type == 2:
dtm = coo_matrix(dtm)
dtm_arr = dtm.A
dtm_flat = dtm.A.flatten()
else:
dtm = np.array(dtm)
dtm_arr = dtm
dtm_flat = dtm.flatten()
if dtm.ndim != 2:
with pytest.raises(ValueError):
bow.bow_stats.get_term_proportions(dtm)
else:
if dtm.sum() == 0:
with pytest.raises(ValueError):
bow.bow_stats.get_term_proportions(dtm)
else:
tp = bow.bow_stats.get_term_proportions(dtm)
assert tp.ndim == 1
assert tp.shape == (dtm_arr.shape[1],)
if len(dtm_flat) > 0:
assert np.isclose(tp.sum(), 1.0)
assert all(0 <= v <= 1 for v in tp)
| StarcoderdataPython |
5042182 | <reponame>CGYR/gongchengmiao_BBS
import datetime
from haystack import indexes
from .models import ArticlePost
class ArticlePostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
#author = indexes.CharField(model_attr='author')
#pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return ArticlePost
#def index_queryset(self, using=None): # 重载index_..函数
# """Used when the entire index for model is updated."""
# return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
def index_queryset(self, using=None):
return self.get_model().objects.all() | StarcoderdataPython |
3587992 | <filename>CADRE/power_dymos/power_cell_voltage.py
"""
Power discipline for CADRE: Power Cell Voltage component.
"""
from __future__ import print_function, division, absolute_import
from six.moves import range
import os
import numpy as np
from openmdao.api import ExplicitComponent
from MBI import MBI
class PowerCellVoltage(ExplicitComponent):
"""
Compute the output voltage of the solar panels.
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('filename', fpath + '/../data/Power/curve.dat',
desc="File containing surrogate model for voltage.")
def setup(self):
nn = self.options['num_nodes']
filename = self.options['filename']
dat = np.genfromtxt(filename)
nT, nA, nI = dat[:3]
nT = int(nT)
nA = int(nA)
nI = int(nI)
T = dat[3:3 + nT]
A = dat[3 + nT:3 + nT + nA]
I = dat[3 + nT + nA:3 + nT + nA + nI] # noqa: E741
V = dat[3 + nT + nA + nI:].reshape((nT, nA, nI), order='F')
self.MBI = MBI(V, [T, A, I], [6, 6, 15], [3, 3, 3])
self.x = np.zeros((84 * nn, 3), order='F')
self.xV = self.x.reshape((nn, 7, 12, 3), order='F')
# Inputs
self.add_input('LOS', np.zeros((nn, )), units=None,
desc='Line of Sight over Time')
self.add_input('temperature', np.zeros((nn, 5)), units='degK',
desc='Temperature of solar cells over time')
self.add_input('exposed_area', np.zeros((nn, 7, 12)), units='m**2',
desc='Exposed area to sun for each solar cell over time')
self.add_input('Isetpt', np.zeros((nn, 12)), units='A',
desc='Currents of the solar panels')
# Outputs
self.add_output('V_sol', np.zeros((nn, 12)), units='V',
desc='Output voltage of solar panel over time')
rows = np.arange(nn*12)
cols = np.tile(np.repeat(0, 12), nn) + np.repeat(np.arange(nn), 12)
self.declare_partials('V_sol', 'LOS', rows=rows, cols=cols)
row = np.tile(np.repeat(0, 5), 12) + np.repeat(np.arange(12), 5)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 60)
col = np.tile(np.arange(5), 12)
cols = np.tile(col, nn) + np.repeat(5*np.arange(nn), 60)
self.declare_partials('V_sol', 'temperature', rows=rows, cols=cols)
row = np.tile(np.arange(12), 7)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 84)
cols = np.arange(nn*7*12)
self.declare_partials('V_sol', 'exposed_area', rows=rows, cols=cols)
row_col = np.arange(nn*12)
self.declare_partials('V_sol', 'Isetpt', rows=row_col, cols=row_col)
def setx(self, inputs):
temperature = inputs['temperature']
LOS = inputs['LOS']
exposed_area = inputs['exposed_area']
Isetpt = inputs['Isetpt']
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
self.xV[:, c, p, 0] = temperature[:, i]
self.xV[:, c, p, 1] = LOS * exposed_area[:, c, p]
self.xV[:, c, p, 2] = Isetpt[:, p]
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
nn = self.options['num_nodes']
self.setx(inputs)
self.raw = self.MBI.evaluate(self.x)[:, 0].reshape((nn, 7, 12), order='F')
outputs['V_sol'] = np.zeros((nn, 12))
for c in range(7):
outputs['V_sol'] += self.raw[:, c, :]
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
exposed_area = inputs['exposed_area']
LOS = inputs['LOS']
raw1 = self.MBI.evaluate(self.x, 1)[:, 0].reshape((nn, 7, 12), order='F')
raw2 = self.MBI.evaluate(self.x, 2)[:, 0].reshape((nn, 7, 12), order='F')
raw3 = self.MBI.evaluate(self.x, 3)[:, 0].reshape((nn, 7, 12), order='F')
dV_dL = np.empty((nn, 12))
dV_dT = np.zeros((nn, 12, 5))
dV_dA = np.zeros((nn, 7, 12))
dV_dI = np.empty((nn, 12))
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
dV_dL[:, p] += raw2[:, c, p] * exposed_area[:, c, p]
dV_dT[:, p, i] += raw1[:, c, p]
dV_dA[:, c, p] += raw2[:, c, p] * LOS
dV_dI[:, p] += raw3[:, c, p]
partials['V_sol', 'LOS'] = dV_dL.flatten()
partials['V_sol', 'temperature'] = dV_dT.flatten()
partials['V_sol', 'exposed_area'] = dV_dA.flatten()
partials['V_sol', 'Isetpt'] = dV_dI.flatten() | StarcoderdataPython |
1633561 | import numpy as np
from scipy.spatial import distance
from scipy.sparse import csgraph
from matplotlib import pyplot
from matplotlib.widgets import Slider, Button, RadioButtons
import linear_utilities as lu
def rkm(X, init_W, s, plot_ax=None):
"""
Regularized K-means for principal path, MINIMIZER.
Args:
[ndarray float] X: data matrix
[ndarray float] init_W: initial waypoints matrix
[float] s: regularization parameter
[matplotlib.axis.Axes] plot_ax: Axes for the 2D plot (first 2 dim of X), None to avoid plotting
Returns:
[ndarray float] W: final waypoints matrix
[ndarray int] labels: final
References:
[1] 'Finding Prinicpal Paths in Data Space', M.J.Ferrarotti, W.Rocchia, S.Decherchi, [submitted]
[2] 'Design and HPC Implementation of Unsupervised Kernel Methods in the Context of Molecular Dynamics', M.J.Ferrarotti, PhD Thesis.
"""
#extract useful info from args
N = X.shape[0]
d = X.shape[1]
NC = init_W.shape[0]-2
#construct boundary matrix
boundary = init_W[[0,NC+1],:]
B=np.zeros([NC,d],float)
B[[0,NC-1],:]=boundary
#construct regularizer hessian
AW = np.diag(np.ones(NC))+np.diag(-0.5*np.ones(NC-1),1)+np.diag(-0.5*np.ones(NC-1),-1)
#compute initial labels
XW_dst = distance.cdist(X,init_W,'sqeuclidean')
u = XW_dst.argmin(1)
#iterate the minimizer
converged = False
it = 0
while(not converged):
it = it+1
print('iteration '+repr(it))
#compute cardinality
W_card=np.zeros(NC+2,int)
for i in range(NC+2):
W_card[i] = np.sum(u==i)
#compute centroid matrix
C = np.ndarray([NC,d],float)
for i in range(NC):
C[i,:] = np.sum(X[u==i+1,:],0)
#construct k-means hessian
AX = np.diag(W_card[1:NC+1])
#update waypoints
W = np.matmul(np.linalg.pinv(AX+s*AW),C+0.5*s*B)
W = np.vstack([boundary[0,:],W,boundary[1,:]])
#compute new labels
XW_dst = distance.cdist(X,W,'sqeuclidean')
u_new = XW_dst.argmin(1)
#check for convergence
converged = not np.sum(u_new!=u)
u=u_new
#plot
if(plot_ax is not None):
pyplot.sca(plot_ax)
pyplot.ion()
pyplot.cla()
pyplot.title('Annealing, s='+repr(s))
pyplot.plot(X[:,0],X[:,1],'bo')
pyplot.plot(W[:,0],W[:,1],'-ro')
pyplot.axis('equal')
pyplot.pause(1.0/60)
return W, u
def rkm_cost(X, W, s):
"""
Regularized K-means for principal path, COST EVALUATION.
(most stupid implementation)
Args:
[ndarray float] X: data matrix
[ndarray float] W: waypoints matrix
[float] s: regularization parameter
Returns:
[float] cost_km: K-means part of the cost
[float] cost_reg: regularizer part of the cost
"""
XW_dst = distance.cdist(X,W,'sqeuclidean')
u = XW_dst.argmin(1)
cost_km=0.0
for i,x in enumerate(X):
w = W[u[i],:]
cost_km = cost_km + np.dot(x,x) + np.dot(w,w) -2*np.dot(x,w)
cost_reg=0.0
for i,w in enumerate(W[0:-1,:]):
w_nxt = W[i+1,:]
cost_reg = cost_reg + np.dot(w,w) + np.dot(w_nxt,w_nxt) - 2*np.dot(w,w_nxt)
cost_reg = s*cost_reg
return cost_km, cost_reg
def rkm_prefilter(X, boundary_ids, Nf=200, k=5, p=1000, T=0.1, plot_ax=None):
"""
Regularized K-means for principal path, PREFILTER.
Args:
[ndarray float] X: data matrix
[ndarray int] boundary_ids: start/end waypoints as sample indices
[int] Nf: number of filter centroids
[int] k: number of nearest neighbor for the penalized graph
[float] p: penalty factor for the penalized graph
[float] T: filter threshold
[matplotlib.axis.Axes] plot_ax: Axes for the 2D plot (first 2 dim of X), None to avoid plotting
Returns:
[ndarray float] X_filtered
[ndarray int] boundary_ids_filtered
[ndarray float] X_garbage
"""
#pick Nf medoids with k-means++ and compute pairwise distance matrix
med_ids = lu.initMedoids(X, Nf-2, 'kpp', boundary_ids)
med_ids = np.hstack([boundary_ids[0],med_ids,boundary_ids[1]])
medmed_dst = distance.cdist(X[med_ids,:],X[med_ids,:],'sqeuclidean')
#build k-nearest-neighbor penalized matrix
knn_ids = np.argsort(medmed_dst,1)
medmed_dst_p = medmed_dst.copy()*p
for i in range(Nf):
for j in range(k):
k=knn_ids[i,j]
medmed_dst_p[i,k] = medmed_dst[i,k]
medmed_dst_p[k,i] = medmed_dst[k,i]
medmed_dst_p[0,Nf-1]=0
medmed_dst_p[Nf-1,0]=0
#find shortest path using dijkstra
[path_dst, path_pre] = csgraph.dijkstra(medmed_dst_p, False, 0,True)
path=np.ndarray(0,int)
i=Nf-1
while(i != 0):
path=np.hstack([i,path])
i = path_pre[i]
path=np.hstack([i,path])
#filter out medoids too close to the shortest path
T=T*np.mean(medmed_dst)
to_filter_ids=np.ndarray(0,int)
for i in path:
to_filter_ids = np.hstack([np.where(medmed_dst[i,:]<T)[0], to_filter_ids])
to_filter_ids = np.setdiff1d(to_filter_ids,path)
to_filter_ids = np.unique(to_filter_ids)
to_keep_ids = np.setdiff1d(np.asarray(range(Nf)),to_filter_ids)
Xmed_dst = distance.cdist(X,X[med_ids[to_keep_ids],:],'sqeuclidean')
u = med_ids[to_keep_ids][Xmed_dst.argmin(1)]
N=X.shape[0]
filter_mask = np.zeros(N,bool)
for i in range(N):
if u[i] in med_ids[path]:
filter_mask[i]=True
#convert boundary indices
boundary_ids_filtered = boundary_ids.copy()
boundary_ids_filtered[0] = boundary_ids[0] - boundary_ids[0] + np.sum(filter_mask[0:boundary_ids[0]])
boundary_ids_filtered[1] = boundary_ids[1] - boundary_ids[1] + np.sum(filter_mask[0:boundary_ids[1]])
#plot filter figure
if(plot_ax is not None):
pyplot.sca(plot_ax)
pyplot.ion()
pyplot.plot(X[np.logical_not(filter_mask),0],X[np.logical_not(filter_mask),1],'yo',label='data filtered out')
pyplot.plot(X[filter_mask,0],X[filter_mask,1],'bo',label='data kept')
pyplot.plot(X[med_ids,0],X[med_ids,1],'ro',label='filter medoids')
pyplot.plot(X[med_ids[to_filter_ids],0],X[med_ids[to_filter_ids],1],'kx',label='filter medoids dropped')
pyplot.plot(X[med_ids[path],0],X[med_ids[path],1],'-go',label='filter shortest path')
pyplot.plot(X[filter_mask,:][boundary_ids_filtered,0],X[filter_mask,:][boundary_ids_filtered,1],'mo',label='boundary samples')
pyplot.legend()
pyplot.axis('equal')
return X[filter_mask,:], boundary_ids_filtered, X[np.logical_not(filter_mask),:]
def rkm_MS_evidence(models, s_span, X):
"""
Regularized K-means for principal path, MODEL SELECTION, Bayesian Evidence.
Args:
[ndarray float] models: matrix with path models, shape N_models x N x (NC+2)
[ndarray float] s_span: array with values of the reg parameter for each model (sorted in decreasing order, with 0 as last value)
[ndarray float] X: data matrix
Returns:
[ndarray float] logE_s: array with values of log evidence for each model
"""
if(s_span[-1]>0.0):
raise ValueError('In order to evaluate the evidence a model with s=0 has to be provided')
#Evaluate unregularized cost
cost_ureg=np.sum(rkm_cost(X, models[-1,:,:],s_span[-1]))
logE_s = np.ndarray(s_span.size,float)
for i,s in enumerate(s_span):
N = X.shape[0]
W = models[i,:,:]
NC = W.shape[0]-2
d = W.shape[1]
#Set gamma (empirical rational) and compute lambda
gamma = np.sqrt(N)*0.125/np.mean(distance.cdist(X,X,'euclidean'))
lambd = s*gamma
#Maximum Posterior cost
cost_MP=np.sum(rkm_cost(X, W, s))
#Find labels
XW_dst = distance.cdist(X,W,'sqeuclidean')
u = XW_dst.argmin(1)
#Compute cardinality
W_card=np.zeros(NC+2,int)
for j in range(NC+2):
W_card[j] = np.sum(u==j)
#Construct boundary matrix
boundary = W[[0,NC+1],:]
B=np.zeros([NC,d],float)
B[[0,NC-1],:]=boundary
#Construct regularizer hessian
AW = np.diag(np.ones(NC))+np.diag(-0.5*np.ones(NC-1),1)+np.diag(-0.5*np.ones(NC-1),-1)
#Construct k-means hessian
AX = np.diag(W_card[1:NC+1])
#Compute global hessian
A = AX+s*AW
#Evaluate log-evidence
logE = -0.5*d*np.log(np.sum(np.linalg.eigvals(A)))
logE = logE + gamma*(cost_ureg-cost_MP)
if(lambd>0):
logE = logE + 0.5*d*NC*np.log(lambd)
else:
logE = logE + 0.5*d*NC*np.log(lambd+np.finfo(np.float).eps)
logE = logE - 0.125*lambd*np.trace(np.matmul(B.T,np.matmul(np.linalg.pinv(AW),B)))
logE = logE + 0.25*lambd*np.trace(np.matmul(B.T,B))
logE_s[i] = logE
return logE_s
def rkm_MS_pathlen(models, s_span, X):
"""
Regularized K-means for principal path, MODEL SELECTION, Path length.
Args:
[ndarray float] models: matrix with path models, shape N_models x N x (NC+2)
[ndarray float] s_span: array with values of the reg parameter for each model (sorted in decreasing order, with 0 as last value)
[ndarray float] X: data matrix
Returns:
[ndarray float] len_s: array with values of path length for each model
"""
len_s=np.zeros(s_span.size,float)
for i,s in enumerate(s_span):
W = models[i,:,:]
NC = W.shape[0]-2
for j,w in enumerate(W[0:-1,:]):
w_nxt = W[j+1,:]
len_s[i] = len_s[i] + np.sqrt(np.dot(w,w)+np.dot(w_nxt,w_nxt)-2*np.dot(w,w_nxt))
return len_s
def rkm_MS_pathvar(models, s_span, X):
"""
Regularized K-means for principal path, MODEL SELECTION, variance on waypoints interdistance.
Args:
[ndarray float] models: matrix with path models, shape N_models x N x (NC+2)
[ndarray float] s_span: array with values of the reg parameter for each model (sorted in decreasing order, with 0 as last value)
[ndarray float] X: data matrix
Returns:
[ndarray float] W_dst_var: array with values of variance for each model
"""
W_dst_var=np.ndarray(models.shape[0],float)
for i in range(models.shape[0]):
W = models[i,:,:]
W_dst=np.linalg.norm(W[1:,:]-W[0:-1,:],axis=1)
W_dst_var[i] = np.var(W_dst)
return W_dst_var
def rkm_MS_ksgm(models, s_span, X):
"""
Regularized K-means for principal path, MODEL SELECTION, k-segment projection error.
Args:
[ndarray float] models: matrix with path models, shape N_models x N x (NC+2)
[ndarray float] s_span: array with values of the reg parameter for each model (sorted in decreasing order, with 0 as last value)
[ndarray float] X: data matrix
Returns:
[ndarray float] ksgm_s: array with values of k-segment projection error for each model
"""
N = X.shape[0]
KX = np.matmul(X,X.T)
ksgm_s = np.zeros(models.shape[0],float)
for i in range(models.shape[0]):
W = models[i,:,:]
NC = W.shape[0]
KW = np.matmul(W,W.T)
KXW = np.matmul(X,W.T)
a2 = np.tile(np.diag(KX)[:,np.newaxis],[1,NC-1]) + np.tile(np.diag(KW)[:-1],[N,1]) - 2*KXW[:,:-1]
b2 = np.diag(KW)[:-1]+np.diag(KW)[1:]-2*np.diag(KW,1)
ab = KXW[:,1:]-KXW[:,:-1]+np.tile(np.diag(KW)[:-1],[N,1])-np.tile(np.diag(KW,1),[N,1])
if(np.all(b2>0)):
dst2 = a2 - ab*ab / b2
else:
dst2 = a2 - ab*ab / (b2+np.finfo(np.float).eps)
prj_mask = np.logical_and(ab>0,ab<b2)
dst2[prj_mask==0] = np.inf
prj_mask = np.max(prj_mask,1)
dst2_line = np.min(dst2,1)
dst2_vrtx = np.min(distance.cdist(X,W,'sqeuclidean'),1)
ksgm_s[i] = np.sum(dst2_line[prj_mask])+np.sum(dst2_vrtx[prj_mask==0])
return ksgm_s
def rkm_MS_gui(models, s_span, X, X_g=None):
N = X.shape[0]
d = X.shape[1]
####
#GUI
####
#Main axis (for data)
pyplot.ion()
[gui,ax_data] = pyplot.subplots()
ax_data.set_title('Interactive Model Exploration')
pyplot.subplots_adjust(0.25,0.25,0.75,0.9)
#buttons to perform MS
ax_MS_ev_btn = pyplot.axes([0.8, 0.85, 0.2, 0.05])
MS_ev_btn = Button(ax_MS_ev_btn, 'MS: evidence')
ax_MS_ksgm_btn = pyplot.axes([0.8, 0.75, 0.2, 0.05])
MS_ksgm_btn = Button(ax_MS_ksgm_btn, 'MS: k-segment')
ax_MS_len_btn = pyplot.axes([0.8, 0.65, 0.2, 0.05])
MS_len_btn = Button(ax_MS_len_btn, 'MS: path len')
ax_MS_var_btn = pyplot.axes([0.8, 0.55, 0.2, 0.05])
MS_var_btn = Button(ax_MS_var_btn, 'MS: path var')
#slider to select s
ax_s_sld = pyplot.axes([0.25, 0.1, 0.5, 0.03])
ax_s_sld.set_title('[drag to change the value of s]')
s_sld = Slider(ax_s_sld, 's', 0, s_span.size-1, valstep=1.0)
####
#initial plot
####
[X_plt, ] = ax_data.plot(X[:,0],X[:,1],'bo')
if(X_g is not None):
[X_g_plt, ] = ax_data.plot(X_g[:,0],X_g[:,1],'yo')
s_id=0
[W_plt,] = ax_data.plot(models[s_id,:,0],models[s_id,:,1],'-ro')
ax_data.axis('equal')
####
#event handlers
####
#s slider handler
def s_sld_onchanged(val):
s_id = int(s_span.size-1-val)
W_plt.set_data(models[s_id,:,0:2].T)
s_sld.valtext.set_text("s={:.2f}\ns_id={:d}".format(s_span[s_id],s_id))
#max evidence button handler
def MS_ev_btn_onclicked(ev):
logE_s = rkm_MS_evidence(models, s_span, X)
s_maxE_id = np.argmax(logE_s)
s_sld.set_val(s_span.size-1-s_maxE_id)
[fig,(ax1,ax2)]=pyplot.subplots(2,1)
#plot evidence vs s
ax1.set_title('Model Selection with max Evidence')
ax1.set_xlabel('s')
ax1.set_ylabel('log(E)')
ax1.semilogx(np.flip(s_span,0), np.flip(logE_s,0))
ax1.plot(s_span[s_maxE_id],logE_s[s_maxE_id],'ro')
#plot model selected
ax2.plot(X[:,0],X[:,1],'bo')
if(X_g is not None):
ax2.plot(X_g[:,0],X_g[:,1],'yo')
ax2.plot(models[s_maxE_id,:,0],models[s_maxE_id,:,1],'-ro')
ax2.axis('equal')
# k-segment projection error button handler
def MS_ksgm_btn_onclicked(ev):
ksgm_s = rkm_MS_ksgm(models, s_span, X)
i=0
while(i<ksgm_s.size-1 and ksgm_s[i]>ksgm_s[i+1]):
i=i+1
s_minksgm_id = i
s_sld.set_val(s_span.size-1-s_minksgm_id)
#plot k-segment projection error vs s
[fig,(ax1,ax2)]=pyplot.subplots(2,1)
ax1.set_title('Model Selection with min k-segment projection error')
ax1.set_xlabel('s')
ax1.set_ylabel('ksgm')
ax1.semilogx(np.flip(s_span,0), np.flip(ksgm_s,0))
ax1.plot(s_span[s_minksgm_id],ksgm_s[s_minksgm_id],'ro')
#plot model selected
ax2.plot(X[:,0],X[:,1],'bo')
if(X_g is not None):
ax2.plot(X_g[:,0],X_g[:,1],'yo')
ax2.plot(models[s_minksgm_id,:,0],models[s_minksgm_id,:,1],'-ro')
ax2.axis('equal')
#elbow criteria on path length button handler
def MS_len_btn_onclicked(ev):
len_s = rkm_MS_pathlen(models, s_span, X)
s_elb_id = lu.find_elbow(np.stack([s_span,len_s],-1))
s_sld.set_val(s_span.size-1-s_elb_id)
#plot path length vs s
[fig,(ax1,ax2)]=pyplot.subplots(2,1)
ax1.set_title('Model Selection with elbow method on path length')
ax1.set_xlabel('s')
ax1.set_ylabel('path length')
ax1.plot(np.flip(s_span,0), np.flip(len_s,0))
ax1.plot(s_span[s_elb_id],len_s[s_elb_id],'ro')
#plot model selected
ax2.plot(X[:,0],X[:,1],'bo')
if(X_g is not None):
ax2.plot(X_g[:,0],X_g[:,1],'yo')
ax2.plot(models[s_elb_id,:,0],models[s_elb_id,:,1],'-ro')
ax2.axis('equal')
#elbow criteria on waypoints distance variance button handler
def MS_var_btn_onclicked(ev):
W_dst_var=rkm_MS_pathvar(models, s_span, X)
s_elb_id = lu.find_elbow(np.stack([s_span,W_dst_var],-1))
s_sld.set_val(s_span.size-1-s_elb_id)
#plot waypoints distance variance vs s
[fig,(ax1,ax2)]=pyplot.subplots(2,1)
ax1.set_title('Model Selection with elbow method on waypoins distance variance')
ax1.set_xlabel('s')
ax1.set_ylabel('W distance variance')
ax1.plot(np.flip(s_span,0), np.flip(W_dst_var,0))
ax1.plot(s_span[s_elb_id],W_dst_var[s_elb_id],'ro')
#plot model selected
ax2.plot(X[:,0],X[:,1],'bo')
if(X_g is not None):
ax2.plot(X_g[:,0],X_g[:,1],'yo')
ax2.plot(models[s_elb_id,:,0],models[s_elb_id,:,1],'-ro')
ax2.axis('equal')
####
#register handlers
####
s_sld.on_changed(s_sld_onchanged)
MS_ev_btn.on_clicked(MS_ev_btn_onclicked)
MS_ksgm_btn.on_clicked(MS_ksgm_btn_onclicked)
MS_len_btn.on_clicked(MS_len_btn_onclicked)
MS_var_btn.on_clicked(MS_var_btn_onclicked)
s_sld.set_val(s_span.size/2)
pyplot.show()
raw_input('select model with GUI then press [enter] to continue')
return int(s_span.size-1-s_sld.val)
| StarcoderdataPython |
11289034 | # -*- coding: utf-8 -*-
import sys
sys.path.append('.')
from common import TestCase
import unittest
import fatuv as uv
from fatuv import dns
TEST_IPV4 = '127.0.0.1'
TEST_PORT1 = 12345
class TestDNS(TestCase):
def test_dns_async(self):
def got_addrinfo(request, code, addrinfo):
self.assert_true(addrinfo)
#def got_nameinfo(request, code, hostname, service):
# self.assert_equal(service, 'http')
dns.getaddrinfo('localhost', 80, callback=got_addrinfo)
#uv.getnameinfo('127.0.0.1', 80, callback=got_nameinfo)
self.loop.run()
def test_structures(self):
address4 = dns.Address4(TEST_IPV4, TEST_PORT1)
self.assert_equal(address4.host, TEST_IPV4)
self.assert_equal(address4.port, TEST_PORT1)
#nameinfo = uv.NameInfo('localhost', 'http')
#self.assert_equal(nameinfo.hostname, 'localhost')
#self.assert_equal(nameinfo.service, 'http')
addrinfo = dns.AddrInfo(0, 1, 2, None, address4)
self.assert_equal(addrinfo.family, 0)
self.assert_equal(addrinfo.socktype, 1)
self.assert_equal(addrinfo.protocol, 2)
self.assert_is(addrinfo.canonname, None)
self.assert_equal(addrinfo.address, address4)
if __name__ == '__main__':
unittest.main(verbosity=2) | StarcoderdataPython |
12823940 | <reponame>Dataforsyningen/skraafoto_tile_public
"""TIFF read exceptions."""
class HTTPError(Exception):
"""Represents an upstream http error"""
exit_code = 1
def __init__(self, message, status=None):
self.message = message
self.status = status
class HTTPRangeNotSupportedError(Exception):
"""Represents an error indicating missing upstram support for http range requests"""
exit_code = 1
class TIFFError(Exception):
exit_code = 1
def __init__(self, message):
self.message = message
class JPEGError(Exception):
exit_code = 1
def __init__(self, message):
self.message = message
| StarcoderdataPython |
1623760 | <reponame>Softeq/PyCats
from common.facade.api import BaseRequestModel, BaseResponseModel, endpoint_factory
from common.facade.api import SKIP, pycats_dataclass
from common.facade import raw_config, logger
# TODO - find solution to set dataclass fields properly after initialization
def query_builder(city, token, units='imperial'):
return f'q={city}&appid={token}&units={units}'
@pycats_dataclass
class DailyWeatherEndpointBuilder:
@pycats_dataclass
class _DailyWeatherRequestModel(BaseRequestModel):
resource: str = 'weather'
headers = {"Accept": "application/json"}
post_data = None
put_data = None
patch_data = None
delete_data = None
params = None
allowed_methods = ("get",)
@pycats_dataclass
class _DailyWeatherResponseModel(BaseResponseModel):
status_code = 200
headers = {'Content-Type': 'application/json; charset=utf-8'}
get_data = {"coord": {"lon": SKIP, "lat": SKIP},
"weather": [{"id": SKIP, "main": SKIP, "description": SKIP, "icon": SKIP}],
"base": "stations",
"main": {"temp": SKIP, "feels_like": SKIP, "temp_min": SKIP, "temp_max": SKIP, "pressure": SKIP,
"humidity": SKIP}, "visibility": SKIP, "wind": {"speed": SKIP},
"clouds": {"all": SKIP}, "dt": SKIP,
"sys": {"type": SKIP, "id": SKIP, "country": SKIP, "sunrise": SKIP, "sunset": SKIP},
"timezone": SKIP, "id": SKIP, "name": SKIP, "cod": SKIP}
post_data = None
put_data = None
delete_data = None
patch_data = None
error_data = None
custom_checkers = []
_DailyWeatherResponseModel.configure_validator()
endpoint = endpoint_factory(raw_config.project_settings.web_api_url, __qualname__, # noqa
_DailyWeatherRequestModel, _DailyWeatherResponseModel)
def get_weather_details(self, city, token, units='imperial'):
logger.info("Get weather details")
self.endpoint.request_model.params = query_builder(city, token, units)
result = self.endpoint.get()
return result.get_data
| StarcoderdataPython |
9602759 | <reponame>emilljungberg/pyMERLIN
#!/usr/bin/env python3
from .plot import plot_3plane
from .dataIO import read_image_h5, parse_fname
import matplotlib.pyplot as plt
import argparse
import os
import h5py
import numpy as np
import SimpleITK as sitk
def create_info(matrix, voxel_size, read_points, read_gap, spokes_hi, spokes_lo, lo_scale,
channels, volumes, tr=0, origin=[0, 0, 0], direction=None):
"""
Creates a numpy structured array for riesling h5 files.
Inputs:
- matrix: Matrix size (x,y,z)
- voxel_size: Voxel size in mm (x,y,z)
- read_points: Number of readout points along the spoke
- read_gap: Deadtime gap
- spokes_hi: Number of highres spokes
- spokes_lo: Number of lowres spokes
- lo_scale: Scale factor of the low res spokes
- channels: Number of receive channels
- volumes: Number of volumes
- tr: Repetition time (Default=0)
- origin: Origin of image (x,y,z) (Default: 0,0,0)
- direction: Orientation matrix (Default: eye)
Return: D (structured numpy array)
"""
if not direction:
direction = np.eye(3)
D = np.dtype({'names': [
'matrix',
'voxel_size',
'read_points',
'read_gap',
'spokes_hi',
'spokes_lo',
'lo_scale',
'channels',
'volumes',
'tr',
'origin',
'direction'],
'formats': [
('<i8', (3,)),
('<f4', (3,)),
'<i8',
'<i8',
'<i8',
'<i8',
'<f4',
'<i8',
'<i8',
'<f4',
('<f4', (3,)),
('<f4', (9,))]
})
info = np.array([(matrix, voxel_size, read_points, read_gap, spokes_hi, spokes_lo, lo_scale,
channels, volumes, tr, origin, direction)], dtype=D)
return info
def nii2h5():
"""
Converts a nifti file to riesling format .h5 image file
.. code:: text
usage: nii2h5 niifile
nii2h5 converts from nii to h5
positional arguments:
input Input nii image
optional arguments:
-h, --help show this help message and exit
--out OUT Output h5 image
"""
parser = argparse.ArgumentParser(description='nii2h5 converts from nii to h5',
usage='nii2h5 niifile')
parser.add_argument("input", help="Input nii image")
parser.add_argument("--out", help="Output h5 image",
required=False, type=str)
args = parser.parse_args()
print("Opening {}".format(args.input))
img = sitk.ReadImage(args.input)
origin = img.GetOrigin()
spacing = img.GetSpacing()
direction = img.GetDirection()
img_data = sitk.GetArrayFromImage(img)
info = create_info(matrix=[0, 0, 0],
voxel_size=list(spacing),
read_points=0, read_gap=0, spokes_hi=0, spokes_lo=0, lo_scale=0,
channels=1, volumes=1, origin=list(origin), direction=list(direction))
output_name = None
if args.out:
output_name = args.out
else:
output_name = parse_fname(args.input) + '.h5'
if os.path.isfile(output_name):
print('{} output already exists'.format(output_name))
return
else:
print('Writing to {}'.format(output_name))
h5 = h5py.File(output_name, 'w')
h5.create_dataset('image', data=img_data[np.newaxis, ...])
h5.create_dataset('info', data=info)
h5.close()
def h52nii():
"""
Converts riesling image .h5 file to nifti.
.. code:: text
usage: h52nii h5image
h52nii converts from h5 to nii
positional arguments:
input Input h5 image
optional arguments:
-h, --help show this help message and exit
--out OUT Output image
"""
parser = argparse.ArgumentParser(description='h52nii converts from h5 to nii',
usage='h52nii h5image')
parser.add_argument("input", help="Input h5 image")
parser.add_argument("--out", help="Output image",
required=False, type=str)
args = parser.parse_args()
print("Opening {}".format(args.input))
f = h5py.File(args.input, 'r')
info = f['info'][:]
data = f['image'][0, ...]
f.close()
voxel_size = np.array(info['voxel_size'][0], dtype=float)
origin = np.array(info['origin'][0], dtype=float)
direction = np.array(info['direction'][0], dtype=float)
img = sitk.GetImageFromArray(abs(data))
img.SetOrigin(origin)
img.SetSpacing(voxel_size)
img.SetDirection(direction)
output_name = None
if args.out:
output_name = args.out
else:
output_name = os.path.splitext(args.input)[0] + '.nii.gz'
writer = sitk.ImageFileWriter()
writer.SetFileName(output_name)
print("Saving output to: {}".format(output_name))
writer.Execute(img)
def h5viewer():
"""
Simple static 3-plane viewer of .h5 image data. Will read .h5 files in the riesling format, i.e. with a dataset named ``image``.
.. code:: text
usage: h5viewer file.h5
h5viewer
positional arguments:
H5 File input
optional arguments:
-v, Volume to show (default=0)
-e, Echo to show (default=0)
-h, --help Show this help message and exit
"""
parser = argparse.ArgumentParser(
description="h5viewer", usage='h5viewer file.h5')
parser.add_argument("h5file", metavar="H5", help="File input", type=str)
parser.add_argument("-v", help="Volume", type=int, default=0)
parser.add_argument("-e", help="Echo", type=int, default=0)
args = parser.parse_args()
I, _ = read_image_h5(args.h5file, args.vol, args.echo)
print("Displaying %s" % f)
plot_3plane(abs(I), title=f, cmap='gray', vmin=None, vmax=None)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
5176233 | from app.configuration import get_value
from app.helper import output_start, php, output_error
def execute():
output_start('phpunit')
check_dir = get_value('check-dir')
argument = get_test_argument(check_dir)
if get_value('phpunit-coverage') == 'true':
argument = argument+get_coverage_argument(check_dir)
code = php('phpunit', argument)
if code != 0:
output_error('Some tests failed.')
def get_test_argument(check_dir):
phpunit_xml = get_value('project-dir')+get_value('phpunit-xml')
phpunit_junit_xml = check_dir+'phpunit.xml'
print('>>> phpunit.xml configuration: '+phpunit_xml)
print('>>> JUnit phpunit.xml log: '+phpunit_xml)
return '--configuration '+phpunit_xml+' --debug --log-junit '+phpunit_junit_xml
def get_coverage_argument(check_dir):
base_dir = get_value('build-dir')
print('>>> With coverage and crap index')
return ' --coverage-html '+base_dir+'coverage --coverage-clover '+check_dir+'coverage.xml --coverage-crap4j '+check_dir+'crap4j.xml'
| StarcoderdataPython |
4886452 | import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets.mnist import MNIST
def get_mnist(path="./data/mnist", batch_size=256, shuffle=True, num_workers=8):
train_data = MNIST(path, download=True, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
]))
test_data = MNIST(path, download=True, train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
]))
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
test_loader = DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
return train_loader, test_loader
| StarcoderdataPython |
6690255 | from app.api.wechat import wechat | StarcoderdataPython |
256543 | from setuptools import setup, find_packages
from setuptools import *
setup(
name = "dCrypy",
version = "1.0.1",
author = "Stealth.py",
url = "https://github.com/Stealth-py",
description = "A CLI to decode ciphertext to plaintext from the given options.",
license = "Apache",
packages = find_packages(),
include_package_data = True,
install_requires = [
"Click",
],
entry_points = {
"console_scripts":[
"dcrypy=dcrypy.cli:cli"
]
},
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
"""
dCrypy
Stealth.py, 2021
""" | StarcoderdataPython |
1678457 | from dataclasses import dataclass
from typing import List
from lakey_finicity.models.report.voa.voa_account_record import VoaAccountRecord
@dataclass
class VoaInstitutionRecord(object):
_unused_fields: dict # this is for forward compatibility and should be empty
id: int
name: str
accounts: List[VoaAccountRecord]
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
id = data.pop('id')
name = data.pop('name')
accounts_raw = data.pop('accounts', None)
accounts = [VoaAccountRecord.from_dict(d) for d in accounts_raw] if accounts_raw else []
return VoaInstitutionRecord(
id=id,
name=name,
accounts=accounts,
_unused_fields=data,
)
| StarcoderdataPython |
1673189 | <filename>Script2.py<gh_stars>0
#Tarea 2#
print(u"\u001b[32mHello World")
print (u"\u001b[0m") | StarcoderdataPython |
11322023 | <filename>tests/annotator/test_pos_annotator.py
#!/usr/bin/env python
"""Tests for the POSAnnotator that annotates a sentence with part of speech tags
that align with the token tier."""
from __future__ import absolute_import
import unittest
from epitator.annotator import AnnoDoc
from epitator.pos_annotator import POSAnnotator
class POSAnnotatorTest(unittest.TestCase):
def setUp(self):
self.annotator = POSAnnotator()
def test_simple_sentence(self):
self.doc = AnnoDoc("Hi Joe.")
self.annotator.annotate(self.doc)
self.assertEqual(len(self.doc.tiers['pos'].spans), 3)
self.assertEqual(self.doc.tiers['pos'].spans[0].label, 'UH')
self.assertEqual(self.doc.tiers['pos'].spans[0].start, 0)
self.assertEqual(self.doc.tiers['pos'].spans[0].end, 2)
self.assertEqual(self.doc.tiers['pos'].spans[1].label, 'NNP')
self.assertEqual(self.doc.tiers['pos'].spans[1].start, 3)
self.assertEqual(self.doc.tiers['pos'].spans[1].end, 6)
self.assertEqual(self.doc.tiers['pos'].spans[2].label, '.')
self.assertEqual(self.doc.tiers['pos'].spans[2].start, 6)
self.assertEqual(self.doc.tiers['pos'].spans[2].end, 7)
def test_initial_space(self):
self.doc = AnnoDoc(" Hi.")
self.annotator.annotate(self.doc)
# This is true for the default wordpunct annotator, but not e.g. the
# SpaceAnnotator
self.assertEqual(len(self.doc.tiers['pos'].spans), 2)
self.assertEqual(self.doc.tiers['pos'].spans[0].label, 'UH')
self.assertEqual(self.doc.tiers['pos'].spans[0].start, 1)
self.assertEqual(self.doc.tiers['pos'].spans[0].end, 3)
self.assertEqual(self.doc.tiers['pos'].spans[1].label, '.')
self.assertEqual(self.doc.tiers['pos'].spans[1].start, 3)
self.assertEqual(self.doc.tiers['pos'].spans[1].end, 4)
def test_multiple_spaces_in_a_row(self):
self.doc = AnnoDoc(" Hi there Joe .")
self.annotator.annotate(self.doc)
# This is true for the default wordpunct annotator, but not e.g. the
# SpaceAnnotator
self.assertEqual(len(self.doc.tiers['pos'].spans), 4)
self.assertEqual(self.doc.tiers['pos'].spans[0].label, 'UH')
self.assertEqual(self.doc.tiers['pos'].spans[0].text, 'Hi')
self.assertEqual(self.doc.tiers['pos'].spans[0].start, 9)
self.assertEqual(self.doc.tiers['pos'].spans[0].end, 11)
self.assertEqual(self.doc.tiers['pos'].spans[1].label, 'RB')
self.assertEqual(self.doc.tiers['pos'].spans[1].text, 'there')
self.assertEqual(self.doc.tiers['pos'].spans[1].start, 13)
self.assertEqual(self.doc.tiers['pos'].spans[1].end, 18)
self.assertEqual(self.doc.tiers['pos'].spans[2].label, 'NNP')
self.assertEqual(self.doc.tiers['pos'].spans[2].text, 'Joe')
self.assertEqual(self.doc.tiers['pos'].spans[2].start, 24)
self.assertEqual(self.doc.tiers['pos'].spans[2].end, 27)
self.assertEqual(self.doc.tiers['pos'].spans[3].label, '.')
self.assertEqual(self.doc.tiers['pos'].spans[3].text, '.')
self.assertEqual(self.doc.tiers['pos'].spans[3].start, 29)
self.assertEqual(self.doc.tiers['pos'].spans[3].end, 30)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4807019 | from .scan import DocScanner | StarcoderdataPython |
9758090 | <filename>bc/bc_grammar.py
from parsy import forward_declaration, regex, Parser, string, seq, letter, digit
from bc_ast import *
chr2int = lambda x: ord(x)
str2int = lambda x: int(x)
hexstr2int = lambda x: int(x, 16)
comment = regex(r'//[^\r\n]*').desc('comment')
whitespace = regex(r'[ \t]').desc('whitespace')
whitespaces = regex(r'[ \t]*').desc('whitespaces')
ignore = whitespaces
sep = whitespace.at_least(1)
nl = regex(r'(\r\n|\r|\n)').desc('new line')
lexeme = lambda p: p << ignore
colon = lexeme(string(':'))
comma = lexeme(string(','))
semicolon = lexeme(string(';'))
hash = string('#')
underscore = string('_')
hexprefix = string('0x')
singlequote = string("'")
doublequote = string('"')
assignement = lexeme(string('<-'))
leftbrace = lexeme(string('{'))
rightbrace = lexeme(string('}'))
leftpar = lexeme(string('('))
rightpar = lexeme(string(')'))
ident = lexeme(letter + (letter | digit | underscore).many().concat())
quotedstr = lexeme(doublequote >> regex(r'[^"]*') << doublequote).desc('quoted string')
anychar = regex(r'.').desc('any single character')
decnumber = regex(r'[1-9][0-9]*|0').map(str2int).desc('byte')
hexnumber = hexprefix >> regex(r'[1-9a-fA-F][0-9a-fA-F]*').map(hexstr2int).desc('word')
singlechar = lexeme(singlequote >> anychar << singlequote).map(chr2int).desc('char')
constnumber = lexeme(hexnumber | decnumber | singlechar)\
.map(EXPRESSION_CONSTANT)\
.desc("Constant expression")
conststring = lexeme(quotedstr)\
.map(EXPRESSION_CONST_STR)\
.desc("Constant string")
unary_operator = lexeme(string('&') | string('!'))
binary_factor = lexeme(string('*') | string('/'))
binary_sum = lexeme(string('+') | string('-'))
binary_comp = lexeme(string('>') | string('<'))
binary_eq = lexeme(string('=') | string('!='))
expression = forward_declaration()
expression_functioncall = seq(function_name = ident, params = ignore >> leftpar >> ignore >> nl.optional() >> ignore >> expression.sep_by(comma) << ignore << nl.optional() << ignore << rightpar << ignore).combine_dict(EXPRESSION_CALL).desc("function call")
expression_nested = string('(') >> ignore >> expression << ignore << string(')').desc('nested expression')
expression_term = (conststring | constnumber | expression_functioncall | ident | expression_nested).map(EXPRESSION_TERM).desc('term expression')
expression_unary_act = seq(unary_operator, expression).combine(EXPRESSION_UNARY).desc('unary expression')
expression_unary = expression_unary_act | expression_term
expression_factor = seq(operand1 = expression_unary, arguments = seq(binary_factor, expression_unary).many()).combine_dict(EXPRESSION_BINARY).desc('binary expression factor')
expression_sum = seq(operand1 = expression_factor, arguments = seq(binary_sum, expression_factor).many()).combine_dict(EXPRESSION_BINARY).desc('binary expression sum')
expression_comparision = seq(operand1 = expression_sum, arguments = seq(binary_comp, expression_factor).many()).combine_dict(EXPRESSION_BINARY).desc('binary expression comparision')
expression_equality = seq(operand1 = expression_comparision, arguments = seq(binary_eq, expression_comparision).many()).combine_dict(EXPRESSION_BINARY).desc('binary expression equality')
expression.become(expression_equality)
type = lexeme(string('word') | string('byte'))
variable_declaration = seq(vartype = type, varname = ident).combine_dict(VARIABLE_DECLARATION).desc('variable declaration')
variable_assignement = seq(varname = ident << assignement, expr = expression).combine_dict(VARIABLE_ASSIGNEMENT).desc('variable assignement')
statement = forward_declaration()
code_block = (leftbrace >> nl.optional() >> ignore >> statement.many() << nl.optional() << ignore << rightbrace << nl.optional()).map(CODE_BLOCK).desc('code block')
statement_comment = comment.map(STATEMENT_COMMENT).desc('comment')
statement_expression = expression << semicolon << nl.optional()
statement_variables = (variable_declaration | variable_assignement) << semicolon << nl.optional()
statement_if = seq(expr = lexeme(string('if')) >> leftpar >> expression << rightpar << nl.optional(), code = ignore >> code_block).combine_dict(STATEMENT_IF).desc('if statement')
statement_while = seq(expr = lexeme(string('while')) >> leftpar >> expression << rightpar << nl.optional(), code = ignore >> code_block).combine_dict(STATEMENT_WHILE).desc('while statement')
statement_return = (lexeme(string('return')) >> expression << semicolon << nl.optional()).map(STATEMENT_RETURN).desc('return statement')
statement_asm = (lexeme(string('asm')) >> quotedstr << semicolon << nl.optional()).map(STATEMENT_ASM).desc('asm statement')
statement.become(ignore >> (statement_variables | statement_if | statement_while | statement_return | statement_asm | statement_expression | statement_comment) << nl.optional())
function_params = variable_declaration.sep_by(comma)
function_declaration = seq(return_type = type, function_name = ident, params = leftpar >> function_params << rightpar << semicolon.optional() << nl.optional() << ignore, \
star = lexeme(string('***')).optional() << semicolon.optional() << nl.optional() << ignore, code = nl.optional() >> ignore >> code_block.optional()) \
.combine_dict(FUNCTION_DECLARATION).desc("function declaration")
program = function_declaration.many().map(PROGRAM).desc("b program") | StarcoderdataPython |
9611199 | from utils.randomGen import generateRandomString
from asyncProducerUtil.utils.connect import Connect
from services.ASG import ASG
class AutoScalingDefinition(Connect):
api_name = 'auto-scaling'
table_name = 'scheduler_state_logs'
def asg_def(self, asg, schedule):
OS = asg.operating_system
if OS is None:
OS = 'linux'
tenancy = asg.tenancy
if tenancy == 'default':
tenancy = 'shared'
PLATFORM = []
BUSINESS_UNIT = []
p = next((item for item in asg.tags if item["Key"] == "PLATFORM"), None)
if p: PLATFORM = p['Value']
b = next((item for item in asg.tags if item["Key"] == "BUSINESS_UNIT"), None)
if b: BUSINESS_UNIT = b['Value']
if isinstance(schedule.get('daysActive'), list):
daysActive = ','.join(schedule.get('daysActive'))
else:
daysActive = schedule.get('daysActive')
return {
"uuid": generateRandomString(16),
"resource_id": asg.id,
"resource_type": "asg",
"Region": asg.region,
"Account": self.account,
"InstanceType": asg.instance_type,
"OperatingSystem": OS,
"Tenancy": tenancy,
"PLATFORM": PLATFORM,
"BUSINESS_UNIT": BUSINESS_UNIT,
"StopTime": int(schedule.get('stop_time')),
"StartTime": int(schedule.get('start_time')),
"instance_count": asg.num_instances,
"schedule": daysActive,
"tz": schedule.get('tz'),
"TotalHours": schedule.get('TotalHours') * asg.num_instances
}
def __init__(self, account, region):
Connect.__init__(self, account, region)
# self.resource = Connect.resource_connect(self, self.api_name)
def generate_rows(self, schedules):
auto_scaling_groups = []
for s in schedules:
asg = ASG(self.account, self.region, s['resource_id'])
try:
asg_table_row = self.asg_def(asg, s)
auto_scaling_groups.append(asg_table_row)
except Exception as e:
print e
return {
self.table_name: auto_scaling_groups
}
| StarcoderdataPython |
5010695 | <gh_stars>1-10
from collections import OrderedDict
from typing import Any, Dict, Iterable, Mapping, Optional, Sequence, Tuple
from .dataset import Dataset, build_processors, resolve_samples
from .processors import Processor
class OneShotLoader:
def __init__(
self,
definition: Optional[str] = None,
filepath: Optional[str] = None,
columns: Optional[Iterable[str]] = None,
processors: Optional[Iterable[Tuple[str, Processor]]] = None,
) -> None:
if definition is not None:
self._processors = list(build_processors(definition))
elif columns is not None:
self._processors = list(build_processors("\t".join(columns)))
elif processors is not None:
self._processors = list(processors)
elif filepath is not None:
_, columns = resolve_samples(filepath)
if columns is None:
raise RuntimeError("OneShotLoader requires a file with column headers")
self._processors = list(build_processors("\t".join(columns)))
else:
raise RuntimeError("OneShotLoader requires either a definition, columns, or filepath")
def __call__(self, samples: Sequence[Sequence[str]]) -> Mapping[str, Any]:
if len(samples) == 0:
raise RuntimeError("OneShotLoader requires at least one sample")
if len(samples) == 1:
return OrderedDict((k, v.load(r)) for (k, v), r in zip(self._processors, samples[0]))
return self.collate_fn(
[OrderedDict((k, v(r)) for (k, v), r in zip(self._processors, sample)) for sample in samples]
)
def collate_fn(self, batch: Sequence[Mapping[str, Any]]) -> Dict[str, Any]:
return {key: self._processors[i][1].collate([v[key] for v in batch]) for i, key in enumerate(batch[0].keys())}
@classmethod
def from_dataset(cls, dataset: Dataset) -> "OneShotLoader":
return cls(processors=dataset.get_processors())
| StarcoderdataPython |
3486322 | <gh_stars>1-10
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null
from ..utils.streaming_download_manager import xopen
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
@dataclass
class Image:
"""Image feature to read image data from an image file.
Input: The Image feature accepts as input:
- A :obj:`str`: Absolute path to the image file (i.e. random access is allowed).
- A :obj:`dict` with the keys:
- path: String with relative path of the image file to the archive file.
- bytes: Bytes of the image file.
This is useful for archived files with sequential access.
- An :obj:`np.ndarray`: NumPy array representing an image.
- A :obj:`PIL.Image.Image`: PIL image object.
Args:
decode (:obj:`bool`, default ``True``): Whether to decode the image data. If `False`,
returns the underlying dictionary in the format {"path": image_path, "bytes": image_bytes}.
"""
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (:obj:`str`, :obj:`np.ndarray`, :obj:`PIL.Image.Image` or :obj:`dict`): Data passed as input to Image feature.
Returns:
:obj:`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, np.ndarray):
image = PIL.Image.fromarray(value.astype(np.uint8))
return {"path": None, "bytes": image_to_bytes(image)}
elif isinstance(value, PIL.Image.Image):
return encode_pil_image(value)
elif value.get("bytes") is not None or value.get("path") is not None:
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (obj:`str` or :obj:`dict`): a string with the absolute image file path, a dictionary with
keys:
- path: String with absolute or relative image file path.
- bytes: The bytes of the image file.
Returns:
:obj:`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
with xopen(path, "rb") as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are:
- pa.string() - it must contain the "path" data
- pa.struct({"bytes": pa.binary()})
- pa.struct({"path": pa.string()})
- pa.struct({"bytes": pa.binary(), "path": pa.string()}) - order doesn't matter
- pa.list(*) - it must contain the image array data
Args:
storage (Union[pa.StringArray, pa.StructArray, pa.ListArray]): PyArrow array to cast.
Returns:
pa.StructArray: Array in the Image arrow storage type, that is
pa.struct({"bytes": pa.binary(), "path": pa.string()})
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[
image_to_bytes(PIL.Image.fromarray(np.array(arr, np.uint8))) if arr is not None else None
for arr in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray, drop_paths: bool = True) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (pa.StructArray): PyArrow array to embed.
drop_paths (bool, default ``True``): If True, the paths are set to None.
Returns:
pa.StructArray: Array in the Image arrow storage type, that is
pa.struct({"bytes": pa.binary(), "path": pa.string()})
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string()) if drop_paths else storage.field("path")
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
def list_image_compression_formats() -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG compression."""
buffer = BytesIO()
format = image.format if image.format in list_image_compression_formats() else "PNG"
image.save(buffer, format=format)
return buffer.getvalue()
def encode_pil_image(image: "PIL.Image.Image") -> dict:
if hasattr(image, "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(image)}
def objects_to_list_of_image_dicts(
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]]
) -> List[dict]:
"""Encode a list of objects into a format suitable for creating an extension array of type :obj:`ImageExtensionType`."""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
_, obj = first_non_null_value(objs)
if isinstance(obj, str):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(obj, np.ndarray):
return [
{"path": None, "bytes": image_to_bytes(PIL.Image.fromarray(obj.astype(np.uint8)))}
if obj is not None
else None
for obj in objs
]
elif isinstance(obj, PIL.Image.Image):
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
return [obj_to_image_dict_func(obj) for obj in objs]
else:
return objs
else:
return objs
| StarcoderdataPython |
6452424 | import copy
import six
from .error import SchemaError, Error, Invalid
class Schema(object):
"""
A schema that validates data given to it using the specified rules.
The ``schema`` must be a dictionary of key-value mappings. Values must
be callable validators. See ``XX``.
The ``entire`` argument allows specifying a callable validator that runs on
the entire input after every field is validated. If provided, the validator
will always run, even if validation errors are raised beforehand. Failed
keys will not be included in the given data.
The ``extra_keys`` argument must be one of :attr:`.ACCEPT`, :attr:`.IGNORE`
or :attr:`.REJECT`.
The ``required_error`` argument specifies the error message used when a
key is missing. :attr:`.REQUIRED_ERROR` is the default.
"""
ACCEPT = 'ACCEPT'
IGNORE = 'IGNORE'
REJECT = 'REJECT'
REQUIRED_ERROR = "This field is required."
"""
The default error message for a missing required key.
"""
REJECT_ERROR = "This field is unknown."
"""
The default error message for an unknown rejected key.
"""
def __init__(self, schema, entire=None, extra_keys=IGNORE, required_error=None):
self.extra_keys = extra_keys
self.entire = entire
self.required_error = required_error or self.REQUIRED_ERROR
if not isinstance(schema, dict):
raise SchemaError("The provided schema must be a dictionary.")
self.schema = schema
self.validator = self._build(schema)
def __call__(self, data):
"""
Validates the given ``data`` dictionary and returns transformed values.
Will raise :class:`decent.error.Invalid` if any validation errors are
encountered.
"""
return self.validator(copy.deepcopy(data))
def _build(self, schema):
extra_keys = self.extra_keys
entire = self.entire
# Enumerate all the keys in the schema.
all_keys = set(schema.keys())
_required_keys = set([key for key in all_keys if not isinstance(key, Optional)])
# Enumerate default key values.
defaults = {}
for key in all_keys:
if isinstance(key, Marker) and key.default != None:
defaults[key] = key.default
# Make sure all validators are callable.
for key, value in six.iteritems(schema):
if not hasattr(value, '__call__'):
raise SchemaError("Validator {!r} for key '{!s}' is not callable.".format(value, key))
def validator(data):
# Sanity check.
if not isinstance(data, dict):
raise Invalid([Error("Data must be a dictionary.")])
# Track which required keys are not present.
required_keys = _required_keys.copy()
# Fill available defaults before validating.
missing = all_keys.copy() - set(data.keys())
for key in missing:
if key in defaults:
data[key] = defaults[key]
errors = []
result = {}
for key, value in six.iteritems(data):
# If this key is not in the schema, decide what to do with it.
if key not in all_keys:
if extra_keys == self.ACCEPT:
# Pass through as is.
result[key] = value
elif extra_keys == self.REJECT:
# Reject with error.
errors.append(Error(self.REJECT_ERROR, [key]))
continue # pragma: no cover
# Validate.
validator = schema[key]
result_value = self._run_validator(validator, value, errors, key)
if result_value:
result[key] = result_value
# Track required keys.
if key in required_keys:
required_keys.remove(key)
# Add an error for every missing key.
for key in required_keys:
errors.append(Error(self.required_error, [key]))
# Run the validator for the entire schema.
if entire:
result = self._run_validator(entire, result, errors)
if errors:
raise Invalid(errors)
return result
return validator
def _run_validator(self, validator, data, errors, key=None):
try:
return validator(data)
except Invalid as all:
for e in all:
self._add_error(e, errors, key)
except Error as e:
self._add_error(e, errors, key)
def _add_error(self, error, errors, key=None):
if key:
error.path.insert(0, key)
errors.append(error)
class Marker(object):
"""
A base class for key markers that wrap a key.
"""
def __init__(self, key, default=None):
self.key = key
self.default = default
def __str__(self):
return str(self.key)
def __eq__(self, other):
return self.key == other
def __hash__(self):
return hash(self.key)
__repr__ = __str__
class Default(Marker):
"""
A marker for specifying a default value for a key.
"""
pass
class Optional(Marker):
"""
A marker for specifying a key as optional. The schema will validate data
without the key present.
"""
pass
__all__ = ('Schema', 'Marker', 'Default', 'Optional',)
| StarcoderdataPython |
208403 | <gh_stars>0
import Tester
import json
import time
import random
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def shuffled(iterable):
perm = range(len(iterable))
random.shuffle(perm)
return [iterable[i] for i in perm]
class SearchTester(Tester.Tester):
def __init__(self, details):
super(SearchTester, self).__init__(details)
tests_file = details["tests"]
with open(tests_file, "r") as data_file:
self.__tests = json.load(data_file)
self.__break_on_first_error = details["break on first error"]
self.__break_on_first_wrong = details["break on first wrong"]
self.__default_timeout = details.get("default timeout") or 5.0
self.__personal = details.get("personal") or False
pass
def test(self, project_name, submission):
if submission.is_personal():
tests = self.__tests[submission.personal_index()]
else:
tests = self.__tests
for data in tests:
timeout = self.__default_timeout
if isinstance(data, dict):
input = data["input"]
target_output = data["target"]
timeout = data.get("timeout") or self.__default_timeout
elif isinstance(data, list):
input = data[0]
target_output = data[1]
oldinput = input
#Swizzle the order of the individual cars, and turn them around
inputsplit = input.strip().split('\n')
number_of_pillars = int(inputsplit[2])
first_item_index = 3+number_of_pillars
for i in range(first_item_index, len(inputsplit)): #shuffle vehicle direction
inputsplit[i] = '\t'.join(shuffled(inputsplit[i].strip().split('\t')))
inputsplit[first_item_index:] = shuffled(inputsplit[first_item_index:]) #shuffle vehicle order
input = '\n'.join(inputsplit)+'\n'
#print 'SearchTester oldinput vs newinput:',oldinput,input
(stdout, stderr, extraerr) = submission.run(project_name, input, timeout = timeout)
stdout = stdout.decode('utf-8','ignore').encode('ascii','replace').strip()
target_output = target_output.strip()
if not (stderr or extraerr):
(result, message) = self.evaluator.evaluate(input, target_output, stdout, submission.log)
else:
result = 0
if stderr:
#name = school_name.encode('utf8')
message = "Runtime error:\n%s\n\nfor input:\n%s" % (stderr.encode('utf8'),input.encode('utf8'))
else:
message = extraerr
submission.log.log_test(project_name, input, target_output, stdout, result, message)
if self.__break_on_first_error and (stderr or extraerr):
break
if self.__break_on_first_wrong and result != 1.0:
break
| StarcoderdataPython |
3589105 | """Author: <NAME>, Copyright 2019
Word Pack."""
import tensorflow as tf
from wordpack.net.multi_head_attention import MultiHeadAttention
class Transformer(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_size, num_heads, hidden_sizes, output_sizes,
fc_hidden_sizes, fc_output_sizes, **kwargs):
super(Transformer, self).__init__(**kwargs)
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
self.embeddings_map = tf.get_variable("embeddings_map", shape=[
vocab_size, embedding_size], dtype=tf.float32)
self.attention_layers = [MultiHeadAttention(a, b, c) for a, b, c in zip(
num_heads, hidden_sizes, output_sizes)]
self.fc_hidden_layers = [tf.keras.layers.Dense(x) for x in fc_hidden_sizes]
self.fc_output_layers = [tf.keras.layers.Dense(x) for x in fc_output_sizes]
def __call__(self, x):
x = tf.nn.embedding_lookup(self.embeddings_map, x)
for attention, hidden, output in zip(self.attention_layers, self.fc_hidden_layers,
self.fc_output_layers):
x = output(tf.nn.relu(hidden(attention(x, x, x))))
return x
@property
def trainable_variables(self):
layer_variables = [self.embeddings_map]
for layer in self.attention_layers:
layer_variables += layer.trainable_variables
for layer in self.fc_hidden_layers:
layer_variables += layer.trainable_variables
for layer in self.fc_output_layers:
layer_variables += layer.trainable_variables
return layer_variables
@property
def trainable_weights(self):
return self.trainable_variables
@property
def variables(self):
layer_variables = [self.embeddings_map]
for layer in self.attention_layers:
layer_variables += layer.variables
for layer in self.fc_hidden_layers:
layer_variables += layer.variables
for layer in self.fc_output_layers:
layer_variables += layer.variables
return layer_variables
@property
def weights(self):
return self.variables | StarcoderdataPython |
6441252 | <filename>30_day_leetcoding_challenge/week_2/min_stack.py<gh_stars>0
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.__min = 0
self.__topnum = 0
self.__len = 0
self.__val = []
def push(self, x: int) -> None:
self.__val.append(x)
self.__len = len(self.__val)
self.__min = min(self.__val)
def pop(self) -> None:
self.__val.pop()
self.__len = self.__len - 1
if self.__len == 0:
self.__min = None
else:
self.__min = min(self.__val)
def top(self) -> int:
self.__topnum = self.__val[-1]
return self.__topnum
def getMin(self) -> int:
return self.__min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| StarcoderdataPython |
173686 | from polls.tests.test_rules.rule_2.bad_factories import (
PollFactory as BadPollFactory,
)
from polls.tests.test_rules.rule_2.good_factories import (
PollFactory as GoodPollFactory,
)
def test_bad_to_string_with_non_premium_question_without_author():
poll = BadPollFactory.build()
assert str(poll) == "What's Up?"
def test_good_to_string_with_non_premium_question_without_author():
question = GoodPollFactory.build(
premium=False, author=None, question__question_text="Pepsi or Coke?"
)
assert str(question) == "Pepsi or Coke?"
| StarcoderdataPython |
11371285 | # -*- coding: utf-8 -*-
'''
Created on 2018. 9. 12.
@author: jason96
'''
import unittest
from splunklib.client import Index
from sputil.splunk import Indexer, Searcher, ITSIManager
from sputil.base import SPINDEX
import urllib3
import time
urllib3.disable_warnings()
class SplunkIndexerTest(unittest.TestCase):
def setUp(self):
self.indexer = Indexer()
def tearDown(self):
pass
def test_index(self):
json_string = '{"spindex":"test"}'
i = self.indexer.index(SPINDEX, json_string)
self.assertEqual(Index, type(i))
class SplunkSearcherTest(unittest.TestCase):
def setUp(self):
self.searcher = Searcher()
def tearDown(self):
pass
def test_search(self):
spl = ' search index=_internal | head 10 '
for x in self.searcher.search(spl):
self.assertEqual('_internal', x['index'])
class ITSIManagerTest(unittest.TestCase):
def setUp(self):
self.manager = ITSIManager()
def test_get_kpi_base_searches(self):
searches = self.manager.get_kpi_base_searches()
self.assertGreater(len(searches), 0)
for search in searches:
if search['title'] == 'kpitest2':
self.manager.del_kpi_base_search(title='kpitest2')
def test_add_kpi_base_search(self):
rs = self.manager.add_kpi_base_search(title='kpitest2', desc='test')
if '_key' in rs:
self.assertTrue(True)
def test_get_uuid(self):
uuids = []
for x in range(1, 10000): # @UnusedVariable
uuids.append(self.manager.get_uuid())
self.assertEqual(len(uuids), len(list(sorted(set(uuids)))))
def test_get_kpi_services(self):
for x in self.manager.get_kpi_ids():
print x
def test_add_kpi_base_search_metrics(self):
metrics = []
metric = {}
metric['aggregate_statop'] = 'avg'
metric['entity_statop'] = 'avg'
metric['threshold_field'] = 'threshold_field'
metric['title'] = 'threshold_field'
metric['unit'] = ''
metric['_key'] = self.manager.get_uuid()
metrics.append(metric)
self.manager.add_kpi_base_search_metrics('kpitest2', metrics)
time.sleep(1)
for kpi_base_search in self.manager.get_kpi_base_searches():
if kpi_base_search['title'] == 'kpitest2':
title = kpi_base_search['metrics'][0]['title']
self.assertEqual(title, 'threshold_field')
print kpi_base_search
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3419237 | <filename>tests/conftest.py
from itertools import zip_longest
from typing import Dict, List, Optional
from arche.readers.items import CollectionItems, JobItems
from arche.rules.result import Result
import numpy as np
import pandas as pd
import pytest
cloud_items = [
{"_key": "112358/13/21/0", "price": 0, "name": "Elizabeth"},
{"_key": "112358/13/21/1", "name": "Margaret"},
{"_key": "112358/13/21/2", "price": 10, "name": "Yulia"},
{"_key": "112358/13/21/3", "price": 11, "name": "Vivien"},
]
default_schema = {
"$schema": "http://json-schema.org/draft-07/schema",
"required": ["_key", "name"],
"type": "object",
"properties": {
"_key": {"type": "string"},
"price": {"type": "integer"},
"name": {"type": "string"},
},
"additionalProperties": False,
}
@pytest.fixture(scope="session")
def get_cloud_items(request):
return cloud_items
@pytest.fixture(scope="session")
def get_raw_items(request):
return np.array(cloud_items)
@pytest.fixture(scope="session")
def get_schema():
return default_schema
@pytest.fixture(scope="function")
def get_df():
df = pd.DataFrame({"first": [0.25, 0.75], "second": [0.0, 1.0]})
df.name = "a df"
return df
class Job:
def __init__(self, items=None, metadata=None, stats=None, key="112358/13/21"):
self.items = Source(items, stats)
self.key = key
if metadata:
self.metadata = metadata
else:
self.metadata = {}
class Collection:
def __init__(self, count: Optional[int] = None):
self._count = count
def count(self) -> Optional[int]:
return self._count
class Source:
def __init__(self, items=None, stats=None):
self.items = items
if stats:
self._stats = stats
else:
if self.items:
input_values = len(self.items)
else:
input_values = 0
self._stats = {"totals": {"input_values": input_values}}
def stats(self):
return self._stats
def iter(self, **kwargs):
start = kwargs.get("start", 0)
if start:
start = int(start.split("/")[-1])
count = kwargs.get("count", len(self.items) - start)
# Scrapinghub API returns all posible items even if `count` greater than possible
if start + count > len(self.items):
limit = len(self.items)
else:
limit = start + count
if kwargs.get("filter"):
field_name = kwargs.get("filter")[0][0]
value = kwargs.get("filter")[0][1][0]
filtered_items = []
counter = 0
for index in range(start, limit):
if counter == limit:
return
if self.items[index].get(field_name) == value:
filtered_items.append(self.items[index])
counter += 1
for filtered_item in filtered_items:
yield filtered_item
else:
for index in range(start, limit):
yield self.items[index]
@pytest.fixture(scope="function")
def get_source():
return Source(items=cloud_items)
@pytest.fixture(scope="function", params=[(cloud_items, None, None)])
def get_job(request):
return Job(*request.param)
@pytest.fixture(scope="function")
def get_collection():
return Collection()
@pytest.fixture(scope="function")
def get_jobs():
return Job(), Job()
class ScrapinghubClient:
def __init__(self, job: Optional[Job] = get_job):
self._job = job
def get_job(self):
return self._job
@pytest.fixture(scope="function")
def get_client():
return ScrapinghubClient()
@pytest.fixture(scope="function", params=[cloud_items])
def get_job_items(request, mocker):
mocker.patch(
"arche.readers.items.JobItems.job", return_value=get_job, autospec=True
)
mocker.patch(
"arche.readers.items.JobItems.fetch_data",
return_value=np.array(request.param),
autospec=True,
)
job_items = JobItems(key="112358/13/21", count=len(request.param))
return job_items
@pytest.fixture(scope="function", params=[cloud_items])
def get_collection_items(request, mocker):
mocker.patch(
"arche.tools.api.get_collection", return_value=get_collection, autospec=True
)
mocker.patch(
"arche.readers.items.CollectionItems.fetch_data",
return_value=np.array(request.param),
autospec=True,
)
collection_items = CollectionItems(
key="112358/collections/s/pages", count=len(request.param)
)
return collection_items
def create_result(
rule_name, messages, stats=None, err_items_count=None, items_count=None
):
result = Result(rule_name)
for level, messages in messages.items():
for message in messages:
result.add_message(level, *message)
if stats:
result.stats = stats
if err_items_count:
result.err_items_count = err_items_count
if items_count:
result.items_count = items_count
return result
def pytest_assertrepr_compare(op, left, right):
if isinstance(left, Result) and isinstance(right, Result) and op == "==":
assert_msgs = ["Results are equal"]
for (left_n, left_v), (_, right_v) in zip_longest(
left.__dict__.items(), right.__dict__.items()
):
if left_n == "_stats":
for left_stat, right_stat in zip_longest(left_v, right_v):
try:
if isinstance(left_stat, pd.DataFrame):
pd.testing.assert_frame_equal(left_stat, right_stat)
else:
pd.testing.assert_series_equal(left_stat, right_stat)
except AssertionError as e:
assert_msgs.extend([f"{left_stat}", "!=", f"{right_stat}"])
assert_msgs.extend(str(e).split("\n"))
elif left_v != right_v:
assert_msgs.extend([f"{left_v}", "!=", f"{right_v}"])
return assert_msgs
def create_named_df(data: Dict, index: List[str], name: str) -> pd.DataFrame:
df = pd.DataFrame(data, index=index)
df.name = name
return df
| StarcoderdataPython |
5080945 | from django.db import models
# Create your models here.
# 新闻类别表
class Cate(models.Model):
# cate_id = models.CharField(blank=False, max_length=64, verbose_name='ID', unique=True)
cate_name = models.CharField(max_length=64, verbose_name='名字')
def __str__(self):
return self.cate_name
class Meta:
db_table = 'cate'
verbose_name_plural = "新闻类别表"
# 新闻表
class New(models.Model):
new_cate = models.ForeignKey(Cate, related_name="类别") # 所属类别
new_time = models.CharField(max_length=100, verbose_name="发布时间")
new_seenum = models.IntegerField(verbose_name="浏览次数", default=0)
new_disnum = models.IntegerField(verbose_name="跟帖次数", default=0)
index_image_url = models.CharField(max_length=200, verbose_name="新闻列表图片路径", default='SOME STRING')
# related_name定义主表对象查询子表时使用的方法名称
new_title = models.CharField(blank=False, max_length=100, verbose_name="标题")
new_source = models.TextField(verbose_name="新闻来源", max_length=20, blank=False, default="Fantasy News")
digest = models.CharField(max_length=500, default='SOME STRING') # 新闻摘要
new_content = models.TextField(blank=False, verbose_name="新闻内容")
def __str__(self):
return self.new_title
class Meta:
db_table = 'new'
verbose_name_plural = "新闻信息表"
class Correlation(models.Model):
new_id = models.CharField(max_length=100, verbose_name="新闻id")
like_new_id = models.CharField(max_length=100, verbose_name="相似新闻id")
similarity = models.CharField(max_length=100, verbose_name="相似度")
def __str__(self):
return self.new_id
class Meta:
db_table = "new_correlation"
verbose_name_plural = "新闻相似度表"
| StarcoderdataPython |
12850260 | <reponame>neogeo-technologies/collab<filename>geocontrib/migrations/0014_featuretype_title_optional.py
# Generated by Django 2.2.24 on 2021-09-21 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geocontrib', '0013_importtask'),
]
operations = [
migrations.AddField(
model_name='featuretype',
name='title_optional',
field=models.BooleanField(default=False, verbose_name='Titre optionnel'),
),
]
| StarcoderdataPython |
3583515 | import json
from typing import Tuple
import pigpio
direction_type = int
velocity_type = int
motor_command_type = Tuple[direction_type, velocity_type]
drive_command_type = Tuple[motor_command_type, motor_command_type]
class DifferentialDrive(object):
def __init__(self, pin_setup: str, drive_config: str):
self.load_pins(pin_setup)
self.load_config(drive_config)
self.setup_gpio()
def load_pins(self, pin_setup: str):
with open(pin_setup, 'r') as handle:
self.pins = json.load(handle)
def load_config(self, drive_config: str):
with open(drive_config, 'r') as handle:
self.config = json.load(handle)
def setup_gpio(self):
self.pi = pigpio.pi()
for pin in self.pins.values():
self.pi.set_mode(pin, pigpio.OUTPUT)
def drive(self, command: drive_command_type):
(left_motor_command, right_motor_command) = command
self.send_motor_command(left_motor_command, motor='left')
self.send_motor_command(right_motor_command, motor='right')
def send_motor_command(self, command: motor_command_type, motor: str):
if motor not in ['left', 'right']:
return
motor_enable = '{}_motor_enable'.format(motor)
motor_A = '{}_motor_A'.format(motor)
motor_B = '{}_motor_B'.format(motor)
(direction, velocity) = command
direction = self.direction_to_str(direction)
self.pi.set_PWM_dutycycle(self.pins[motor_enable], velocity)
self.pi.write(self.pins[motor_A], self.config[motor][direction][motor_A])
self.pi.write(self.pins[motor_B], self.config[motor][direction][motor_B])
@staticmethod
def direction_to_str(direction: direction_type):
return {1: 'forward', -1: 'backward'}.get(direction, 'stationary')
| StarcoderdataPython |
4897865 | <gh_stars>1-10
import inspect
def serialize(x):
if isinstance(x, (basestring, bool, float, int, long, type(None))):
return x
if isinstance(x, dict):
return {k: serialize(v) for (k, v) in x.items()}
if isinstance(x, (list, tuple)):
return [serialize(i) for i in x]
method = getattr(x, 'serialize', None)
if method:
return serialize(method())
keys = inspect.getargspec(x.__init__).args[1:]
return {k: serialize(getattr(x, k)) for k in keys}
| StarcoderdataPython |
11201932 | <filename>finmarketpy/backtest/backtestengine.py<gh_stars>1-10
__author__ = 'saeedamen'
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Backtest
Conducts backtest for strategies trading assets. Assumes we have an input of total returns. Reports historical return statistics
and returns time series.
"""
import numpy
from findatapy.util import LoggerManager
class Backtest:
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
self._pnl = None
self._portfolio = None
return
def calculate_trading_PnL(self, br, asset_a_df, signal_df):
"""
calculate_trading_PnL - Calculates P&L of a trading strategy and statistics to be retrieved later
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
signal_df : pandas.DataFrame
Signals for the trading strategy
"""
calculations = Calculations()
# signal_df.to_csv('e:/temp0.csv')
# make sure the dates of both traded asset and signal are aligned properly
asset_df, signal_df = asset_a_df.align(signal_df, join='left', axis = 'index')
# only allow signals to change on the days when we can trade assets
signal_df = signal_df.mask(numpy.isnan(asset_df.values)) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down
asset_df = asset_df.fillna(method='ffill') # fill down asset holidays
returns_df = calculations.calculate_returns(asset_df)
tc = br.spot_tc_bp
signal_cols = signal_df.columns.values
returns_cols = returns_df.columns.values
pnl_cols = []
for i in range(0, len(returns_cols)):
pnl_cols.append(returns_cols[i] + " / " + signal_cols[i])
# do we have a vol target for individual signals?
if hasattr(br, 'signal_vol_adjust'):
if br.signal_vol_adjust is True:
risk_engine = RiskEngine()
if not(hasattr(br, 'signal_vol_resample_type')):
br.signal_vol_resample_type = 'mean'
if not(hasattr(br, 'signal_vol_resample_freq')):
br.signal_vol_resample_freq = None
leverage_df = risk_engine.calculate_leverage_factor(returns_df, br.signal_vol_target, br.signal_vol_max_leverage,
br.signal_vol_periods, br.signal_vol_obs_in_year,
br.signal_vol_rebalance_freq, br.signal_vol_resample_freq,
br.signal_vol_resample_type)
signal_df = pandas.DataFrame(
signal_df.values * leverage_df.values, index = signal_df.index, columns = signal_df.columns)
self._individual_leverage = leverage_df # contains leverage of individual signal (before portfolio vol target)
_pnl = calculations.calculate_signal_returns_with_tc_matrix(signal_df, returns_df, tc = tc)
_pnl.columns = pnl_cols
# portfolio is average of the underlying signals: should we sum them or average them?
if hasattr(br, 'portfolio_combination'):
if br.portfolio_combination == 'sum':
portfolio = pandas.DataFrame(data = _pnl.sum(axis = 1), index = _pnl.index, columns = ['Portfolio'])
elif br.portfolio_combination == 'mean':
portfolio = pandas.DataFrame(data = _pnl.mean(axis = 1), index = _pnl.index, columns = ['Portfolio'])
else:
portfolio = pandas.DataFrame(data = _pnl.mean(axis = 1), index = _pnl.index, columns = ['Portfolio'])
portfolio_leverage_df = pandas.DataFrame(data = numpy.ones(len(_pnl.index)), index = _pnl.index, columns = ['Portfolio'])
# should we apply vol target on a portfolio level basis?
if hasattr(br, 'portfolio_vol_adjust'):
if br.portfolio_vol_adjust is True:
risk_engine = RiskEngine()
portfolio, portfolio_leverage_df = risk_engine.calculate_vol_adjusted_returns(portfolio, br = br)
self._portfolio = portfolio
self._signal = signal_df # individual signals (before portfolio leverage)
self._portfolio_leverage = portfolio_leverage_df # leverage on portfolio
# multiply portfolio leverage * individual signals to get final position signals
length_cols = len(signal_df.columns)
leverage_matrix = numpy.repeat(portfolio_leverage_df.values.flatten()[numpy.newaxis,:], length_cols, 0)
# final portfolio signals (including signal & portfolio leverage)
self._portfolio_signal = pandas.DataFrame(
data = numpy.multiply(numpy.transpose(leverage_matrix), signal_df.values),
index = signal_df.index, columns = signal_df.columns)
if hasattr(br, 'portfolio_combination'):
if br.portfolio_combination == 'sum':
pass
elif br.portfolio_combination == 'mean':
self._portfolio_signal = self._portfolio_signal / float(length_cols)
else:
self._portfolio_signal = self._portfolio_signal / float(length_cols)
self._pnl = _pnl # individual signals P&L
# TODO FIX very slow - hence only calculate on demand
_pnl_trades = None
# _pnl_trades = calculations.calculate_individual_trade_gains(signal_df, _pnl)
self._pnl_trades = _pnl_trades
self._ret_stats_pnl = RetStats()
self._ret_stats_pnl.calculate_ret_stats(self._pnl, br.ann_factor)
self._portfolio.columns = ['Port']
self._ret_stats_portfolio = RetStats()
self._ret_stats_portfolio.calculate_ret_stats(self._portfolio, br.ann_factor)
self._cumpnl = calculations.create_mult_index(self._pnl) # individual signals cumulative P&L
self._cumpnl.columns = pnl_cols
self._cumportfolio = calculations.create_mult_index(self._portfolio) # portfolio cumulative P&L
self._cumportfolio.columns = ['Port']
def get_backtest_output(self):
return
def get_pnl(self):
"""
get_pnl - Gets P&L returns
Returns
-------
pandas.Dataframe
"""
return self._pnl
def get_pnl_trades(self):
"""
get_pnl_trades - Gets P&L of each individual trade per signal
Returns
-------
pandas.Dataframe
"""
if self._pnl_trades is None:
calculations = Calculations()
self._pnl_trades = calculations.calculate_individual_trade_gains(self._signal, self._pnl)
return self._pnl_trades
def get_pnl_desc(self):
"""
get_pnl_desc - Gets P&L return statistics in a string format
Returns
-------
str
"""
return self._ret_stats_signals.summary()
def get_pnl_ret_stats(self):
"""
get_pnl_ret_stats - Gets P&L return statistics of individual strategies as class to be queried
Returns
-------
TimeSeriesDesc
"""
return self._ret_stats_pnl
def get_cumpnl(self):
"""
get_cumpnl - Gets P&L as a cumulative time series of individual assets
Returns
-------
pandas.DataFrame
"""
return self._cumpnl
def get_cumportfolio(self):
"""
get_cumportfolio - Gets P&L as a cumulative time series of portfolio
Returns
-------
pandas.DataFrame
"""
return self._cumportfolio
def get_portfolio_pnl(self):
"""
get_portfolio_pnl - Gets portfolio returns in raw form (ie. not indexed into cumulative form)
Returns
-------
pandas.DataFrame
"""
return self._portfolio
def get_portfolio_pnl_desc(self):
"""
get_portfolio_pnl_desc - Gets P&L return statistics of portfolio as string
Returns
-------
pandas.DataFrame
"""
return self._ret_stats_portfolio.summary()
def get_portfolio_pnl_ret_stats(self):
"""
get_portfolio_pnl_ret_stats - Gets P&L return statistics of portfolio as class to be queried
Returns
-------
RetStats
"""
return self._ret_stats_portfolio
def get_individual_leverage(self):
"""
get_individual_leverage - Gets leverage for each asset historically
Returns
-------
pandas.DataFrame
"""
return self._individual_leverage
def get_porfolio_leverage(self):
"""
get_portfolio_leverage - Gets the leverage for the portfolio
Returns
-------
pandas.DataFrame
"""
return self._portfolio_leverage
def get_porfolio_signal(self):
"""
get_portfolio_signal - Gets the signals (with individual leverage & portfolio leverage) for each asset, which
equates to what we would trade in practice
Returns
-------
DataFrame
"""
return self._portfolio_signal
def get_signal(self):
"""
get_signal - Gets the signals (with individual leverage, but excluding portfolio leverage) for each asset
Returns
-------
pandas.DataFrame
"""
return self._signal
########################################################################################################################
"""
TradingModel
Abstract class which wraps around Backtest, providing conveninent functions for analaysis. Implement your own
subclasses of this for your own strategy. See strategyfxcta_example.py for a simple implementation of a FX trend following
strategy.
"""
import abc
import pandas
import datetime
from chartpy import Chart, Style, ChartConstants
from finmarketpy.economics import TechParams
from findatapy.timeseries import Calculations, RetStats, Filter
class TradingModel(object):
#### Default parameters for outputting of results from trading model
SAVE_FIGURES = True
DEFAULT_PLOT_ENGINE = ChartConstants().chartfactory_default_engine
SCALE_FACTOR = ChartConstants().chartfactory_scale_factor
DUMP_CSV = ''
DUMP_PATH = datetime.date.today().strftime("%Y%m%d") + ' '
chart = Chart(engine=DEFAULT_PLOT_ENGINE)
logger = LoggerManager().getLogger(__name__)
def __init__(self):
pass
# to be implemented by every trading strategy
@abc.abstractmethod
def load_parameters(self):
"""
load_parameters - Fills parameters for the backtest, such as start-end dates, transaction costs etc. To
be implemented by subclass.
"""
return
@abc.abstractmethod
def load_assets(self):
"""
load_assets - Loads time series for the assets to be traded and also for data for generating signals.
"""
return
@abc.abstractmethod
def construct_signal(self, spot_df, spot_df2, tech_params):
"""
construct_signal - Constructs signal from pre-loaded time series
Parameters
----------
spot_df : pandas.DataFrame
Market time series for generating signals
spot_df2 : pandas.DataFrame
Market time series for generated signals (can be of different frequency)
tech_params : TechParams
Parameters for generating signals
"""
return
####### Generic functions for every backtest
def construct_strategy(self, br = None):
"""
construct_strategy - Constructs the returns for all the strategies which have been specified.
- gets parameters form fill_backtest_request
- market data from fill_assets
"""
calculations = Calculations()
# get the parameters for backtesting
if hasattr(self, 'br'):
br = self.br
elif br is None:
br = self.load_parameters()
# get market data for backtest
asset_df, spot_df, spot_df2, basket_dict = self.load_assets()
if hasattr(br, 'tech_params'):
tech_params = br.tech_params
else:
tech_params = TechParams()
cumresults = pandas.DataFrame(index = asset_df.index)
portleverage = pandas.DataFrame(index = asset_df.index)
from collections import OrderedDict
ret_statsresults = OrderedDict()
# each portfolio key calculate returns - can put parts of the portfolio in the key
for key in basket_dict.keys():
asset_cut_df = asset_df[[x +'.close' for x in basket_dict[key]]]
spot_cut_df = spot_df[[x +'.close' for x in basket_dict[key]]]
self.logger.info("Calculating " + key)
results, backtest = self.construct_individual_strategy(br, spot_cut_df, spot_df2, asset_cut_df, tech_params, key)
cumresults[results.columns[0]] = results
portleverage[results.columns[0]] = backtest.get_porfolio_leverage()
ret_statsresults[key] = backtest.get_portfolio_pnl_ret_stats()
# for a key, designated as the final strategy save that as the "strategy"
if key == self.FINAL_STRATEGY:
self._strategy_pnl = results
self._strategy_pnl_ret_stats = backtest.get_portfolio_pnl_ret_stats()
self._strategy_leverage = backtest.get_porfolio_leverage()
self._strategy_signal = backtest.get_porfolio_signal()
self._strategy_pnl_trades = backtest.get_pnl_trades()
# get benchmark for comparison
benchmark = self.construct_strategy_benchmark()
cumresults_benchmark = self.compare_strategy_vs_benchmark(br, cumresults, benchmark)
self._strategy_group_benchmark_ret_stats = ret_statsresults
if hasattr(self, '_benchmark_ret_stats'):
ret_statslist = ret_statsresults
ret_statslist['Benchmark'] = (self._benchmark_ret_stats)
self._strategy_group_benchmark_ret_stats = ret_statslist
# calculate annualised returns
years = calculations.average_by_annualised_year(calculations.calculate_returns(cumresults_benchmark))
self._strategy_group_pnl = cumresults
self._strategy_group_pnl_ret_stats = ret_statsresults
self._strategy_group_benchmark_pnl = cumresults_benchmark
self._strategy_group_leverage = portleverage
self._strategy_group_benchmark_annualised_pnl = years
def construct_individual_strategy(self, br, spot_df, spot_df2, asset_df, tech_params, key):
"""
construct_individual_strategy - Combines the signal with asset returns to find the returns of an individual
strategy
Parameters
----------
br : BacktestRequest
Parameters for backtest such as start and finish dates
spot_df : pandas.DataFrame
Market time series for generating signals
spot_df2 : pandas.DataFrame
Secondary Market time series for generated signals (can be of different frequency)
tech_params : TechParams
Parameters for generating signals
Returns
-------
cumportfolio : pandas.DataFrame
backtest : Backtest
"""
backtest = Backtest()
signal_df = self.construct_signal(spot_df, spot_df2, tech_params, br) # get trading signal
backtest.calculate_trading_PnL(br, asset_df, signal_df) # calculate P&L
cumpnl = backtest.get_cumpnl()
if br.write_csv: cumpnl.to_csv(self.DUMP_CSV + key + ".csv")
cumportfolio = backtest.get_cumportfolio()
if br.calc_stats:
cumportfolio.columns = [key + ' ' + str(backtest.get_portfolio_pnl_desc()[0])]
else:
cumportfolio.columns = [key]
return cumportfolio, backtest
def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df):
"""
compare_strategy_vs_benchmark - Compares the trading strategy we are backtesting against a benchmark
Parameters
----------
br : BacktestRequest
Parameters for backtest such as start and finish dates
strategy_df : pandas.DataFrame
Strategy time series
benchmark_df : pandas.DataFrame
Benchmark time series
"""
include_benchmark = False
calc_stats = False
if hasattr(br, 'include_benchmark'): include_benchmark = br.include_benchmark
if hasattr(br, 'calc_stats'): calc_stats = br.calc_stats
if include_benchmark:
ret_stats = RetStats()
risk_engine = RiskEngine()
filter = Filter()
calculations = Calculations()
# align strategy time series with that of benchmark
strategy_df, benchmark_df = strategy_df.align(benchmark_df, join='left', axis = 0)
# if necessary apply vol target to benchmark (to make it comparable with strategy)
if hasattr(br, 'portfolio_vol_adjust'):
if br.portfolio_vol_adjust is True:
benchmark_df = risk_engine.calculate_vol_adjusted_index_from_prices(benchmark_df, br = br)
# only calculate return statistics if this has been specified (note when different frequencies of data
# might underrepresent vol
if calc_stats:
benchmark_df = benchmark_df.fillna(method='ffill')
ret_stats.calculate_ret_stats_from_prices(benchmark_df, br.ann_factor)
benchmark_df.columns = ret_stats.summary()
# realign strategy & benchmark
strategy_benchmark_df = strategy_df.join(benchmark_df, how='inner')
strategy_benchmark_df = strategy_benchmark_df.fillna(method='ffill')
strategy_benchmark_df = filter.filter_time_series_by_date(br.plot_start, br.finish_date, strategy_benchmark_df)
strategy_benchmark_df = calculations.create_mult_index_from_prices(strategy_benchmark_df)
self._benchmark_pnl = benchmark_df
self._benchmark_ret_stats = ret_stats
return strategy_benchmark_df
return strategy_df
def get_strategy_name(self):
return self.FINAL_STRATEGY
def get_individual_leverage(self):
return self._individual_leverage
def get_strategy_group_pnl_trades(self):
return self._strategy_pnl_trades
def get_strategy_pnl(self):
return self._strategy_pnl
def get_strategy_pnl_ret_stats(self):
return self._strategy_pnl_ret_stats
def get_strategy_leverage(self):
return self._strategy_leverage
def get_strategy_group_benchmark_pnl(self):
return self._strategy_group_benchmark_pnl
def get_strategy_group_benchmark_ret_stats(self):
return self._strategy_group_benchmark_ret_stats
def get_strategy_leverage(self):
return self._strategy_group_leverage
def get_strategy_signal(self):
return self._strategy_signal
def get_benchmark(self):
return self._benchmark_pnl
def get_benchmark_ret_stats(self):
return self._benchmark_ret_stats
def get_strategy_group_benchmark_annualised_pnl(self):
return self._strategy_group_benchmark_annualised_pnl
#### Plotting
def reduce_plot(self, data_frame):
"""
reduce_plot - Reduces the frequency of a time series to every business day so it can be plotted more easily
Parameters
----------
data_frame: pandas.DataFrame
Strategy time series
Returns
-------
pandas.DataFrame
"""
try:
# make plots on every business day (will downsample intraday data)
data_frame = data_frame.resample('B')
data_frame = data_frame.fillna(method='pad')
return data_frame
except:
return data_frame
##### Quick helper functions to plot aspects of the strategy such as P&L, leverage etc.
def plot_individual_leverage(self):
style = self.create_style("Leverage", "Individual Leverage")
try:
self.chart.plot(self.reduce_plot(self._individual_leverage), chart_type='line', style=style)
except: pass
def plot_strategy_group_pnl_trades(self):
style = self.create_style("(bp)", "Individual Trade PnL")
# zero when there isn't a trade exit
# strategy_pnl_trades = self._strategy_pnl_trades * 100 * 100
# strategy_pnl_trades = strategy_pnl_trades.dropna()
# note only works with single large basket trade
try:
strategy_pnl_trades = self._strategy_pnl_trades.fillna(0) * 100 * 100
self.chart.plot(self.reduce_plot(strategy_pnl_trades), chart_type='line', style=style)
except: pass
def plot_strategy_pnl(self):
style = self.create_style("", "Strategy PnL")
try:
self.chart.plot(self.reduce_plot(self._strategy_pnl), chart_type='line', style=style)
except: pass
def plot_strategy_signal_proportion(self, strip = None):
signal = self._strategy_signal
# count number of long, short and flat periods in our sample
long = signal[signal > 0].count()
short = signal[signal < 0].count()
flat = signal[signal == 0].count()
keys = long.index
# how many trades have there been (ignore size of the trades)
trades = abs(signal - signal.shift(-1))
trades = trades[trades > 0].count()
df_trades = pandas.DataFrame(index = keys, columns = ['Trades'], data = trades)
df = pandas.DataFrame(index = keys, columns = ['Long', 'Short', 'Flat'])
df['Long'] = long
df['Short'] = short
df['Flat'] = flat
if strip is not None: keys = [k.replace(strip, '') for k in keys]
df.index = keys
df_trades.index = keys
# df = df.sort_index()
style = self.create_style("", "")
try:
style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy signal proportion).png'
style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy signal proportion).html'
self.chart.plot(self.reduce_plot(df), chart_type='bar', style=style)
style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade no).png'
style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade no).html'
self.chart.plot(self.reduce_plot(df_trades), chart_type='bar', style=style)
except: pass
def plot_strategy_leverage(self):
style = self.create_style("Leverage", "Strategy Leverage")
try:
self.chart.plot(self.reduce_plot(self._strategy_leverage), chart_type='line', style=style)
except: pass
def plot_strategy_group_benchmark_pnl(self, strip = None):
style = self.create_style("", "Group Benchmark PnL - cumulative")
strat_list = self._strategy_group_benchmark_pnl.columns #.sort_values()
for line in strat_list:
self.logger.info(line)
# plot cumulative line of returns
self.chart.plot(self.reduce_plot(self._strategy_group_benchmark_pnl), style=style)
# needs write stats flag turned on
try:
keys = self._strategy_group_benchmark_ret_stats.keys()
ir = []
for key in keys: ir.append(self._strategy_group_benchmark_ret_stats[key].inforatio()[0])
if strip is not None: keys = [k.replace(strip, '') for k in keys]
ret_stats = pandas.DataFrame(index = keys, data = ir, columns = ['IR'])
# ret_stats = ret_stats.sort_index()
style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Group Benchmark PnL - IR) ' + style.SCALE_FACTOR + '.png'
style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Group Benchmark PnL - IR) ' + style.SCALE_FACTOR + '.html'
style.display_brand_label = False
self.chart.plot(ret_stats, chart_type='bar', style=style)
except: pass
def plot_strategy_group_benchmark_annualised_pnl(self, cols = None):
# TODO - unfinished, needs checking!
if cols is None: cols = self._strategy_group_benchmark_annualised_pnl.columns
style = self.create_style("", "Group Benchmark Annualised PnL")
style.color = ['red', 'blue', 'purple', 'gray', 'yellow', 'green', 'pink']
self.chart.plot(self.reduce_plot(self._strategy_group_benchmark_annualised_pnl[cols]), chart_type='line', style=style)
def plot_strategy_group_leverage(self):
style = self.create_style("Leverage", "Group Leverage")
self.chart.plot(self.reduce_plot(self._strategy_group_leverage), chart_type='line', style=style)
def plot_strategy_signals(self, date = None, strip = None):
######## plot signals
strategy_signal = self._strategy_signal
strategy_signal = 100 * (strategy_signal)
if date is None:
last_day = strategy_signal.ix[-1].transpose().to_frame()
else:
last_day = strategy_signal.ix[date].transpose().to_frame()
if strip is not None:
last_day.index = [x.replace(strip, '') for x in last_day.index]
style = self.create_style("positions (% portfolio notional)", "Positions")
self.chart.plot(last_day, chart_type='bar', style=style)
def create_style(self, title, file_add):
style = Style()
style.title = self.FINAL_STRATEGY + " " + title
style.display_legend = True
style.scale_factor = self.SCALE_FACTOR
if self.DEFAULT_PLOT_ENGINE not in ['plotly', 'cufflinks'] and self.SAVE_FIGURES:
style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (' + file_add + ') ' + str(style.scale_factor) + '.png'
style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (' + file_add + ') ' + str(style.scale_factor) + '.html'
try:
style.silent_display = self.SILENT_DISPLAY
except: pass
return style
#######################################################################################################################
"""
RiskEngine
Adjusts signal weighting according to risk constraints (volatility targeting)
"""
class RiskEngine(object):
def calculate_vol_adjusted_index_from_prices(self, prices_df, br):
"""
calculate_vol_adjusted_index_from_price - Adjusts an index of prices for a vol target
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
Returns
-------
pandas.Dataframe containing vol adjusted index
"""
calculations = Calculations()
returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns=False)
return calculations.create_mult_index(returns_df)
def calculate_vol_adjusted_returns(self, returns_df, br, returns=True):
"""
calculate_vol_adjusted_returns - Adjusts returns for a vol target
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
returns_a_df : pandas.DataFrame
Asset returns to be traded
Returns
-------
pandas.DataFrame
"""
calculations = Calculations()
if not returns: returns_df = calculations.calculate_returns(returns_df)
if not (hasattr(br, 'portfolio_vol_resample_type')):
br.portfolio_vol_resample_type = 'mean'
if not (hasattr(br, 'portfolio_vol_resample_freq')):
br.portfolio_vol_resample_freq = None
leverage_df = self.calculate_leverage_factor(returns_df,
br.portfolio_vol_target, br.portfolio_vol_max_leverage,
br.portfolio_vol_periods, br.portfolio_vol_obs_in_year,
br.portfolio_vol_rebalance_freq, br.portfolio_vol_resample_freq,
br.portfolio_vol_resample_type)
vol_returns_df = calculations.calculate_signal_returns_with_tc_matrix(leverage_df, returns_df, tc=br.spot_tc_bp)
vol_returns_df.columns = returns_df.columns
return vol_returns_df, leverage_df
def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vol_periods=60, vol_obs_in_year=252,
vol_rebalance_freq='BM', data_resample_freq=None, data_resample_type='mean',
returns=True, period_shift=0):
"""
calculate_leverage_factor - Calculates the time series of leverage for a specified vol target
Parameters
----------
returns_df : DataFrame
Asset returns
vol_target : float
vol target for assets
vol_max_leverage : float
maximum leverage allowed
vol_periods : int
number of periods to calculate volatility
vol_obs_in_year : int
number of observations in the year
vol_rebalance_freq : str
how often to rebalance
vol_resample_type : str
do we need to resample the underlying data first? (eg. have we got intraday data?)
returns : boolean
is this returns time series or prices?
period_shift : int
should we delay the signal by a number of periods?
Returns
-------
pandas.Dataframe
"""
calculations = Calculations()
filter = Filter()
if data_resample_freq is not None:
return
# TODO not implemented yet
if not returns: returns_df = calculations.calculate_returns(returns_df)
roll_vol_df = calculations.rolling_volatility(returns_df,
periods=vol_periods, obs_in_year=vol_obs_in_year).shift(
period_shift)
# calculate the leverage as function of vol target (with max lev constraint)
lev_df = vol_target / roll_vol_df
lev_df[lev_df > vol_max_leverage] = vol_max_leverage
lev_df = filter.resample_time_series_frequency(lev_df, vol_rebalance_freq, data_resample_type)
returns_df, lev_df = returns_df.align(lev_df, join='left', axis=0)
lev_df = lev_df.fillna(method='ffill')
lev_df.ix[0:vol_periods] = numpy.nan # ignore the first elements before the vol window kicks in
return lev_df | StarcoderdataPython |
8141765 | <filename>plio/io/io_json.py
import json
import numpy as np
import datetime
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def read_json(inputfile):
"""
Read the input json file into a python dictionary.
Parameters
==========
inputfile : str
PATH to the file on disk
Returns
=======
jobs : dict
returns a dictionary
"""
with open(inputfile, 'r') as f:
try:
jdict = json.load(f)
return jdict
except IOError: # pragma: no cover
return
def write_json(outdata, outputfile):
"""
Write a Python dictionary as a plain-text JSON file
Parameters
==========
outdata : dict
The data structure to be serialized
outputfile : str
The file to write the data to.
"""
try:
with open(outputfile, 'w') as f:
f.write(json.dumps(outdata, outputfile))
except: # pragma: no cover
raise IOError('Unable to write data to {}'.format(outputfile))
| StarcoderdataPython |
301909 | <filename>agency/urls.py
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.home, name='home'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| StarcoderdataPython |
3259975 | <gh_stars>1000+
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
def target_function(w,b):
x = 2*w+3*b
y=2*b+1
z=x*y
return x,y,z
def single_variable(w,b,t):
print("\nsingle variable: b ----- ")
error = 1e-5
while(True):
x,y,z = target_function(w,b)
delta_z = z - t
print("w=%f,b=%f,z=%f,delta_z=%f"%(w,b,z,delta_z))
if abs(delta_z) < error:
break
delta_b = delta_z /63
print("delta_b=%f"%delta_b)
b = b - delta_b
print("done!")
print("final b=%f"%b)
def single_variable_new(w,b,t):
print("\nsingle variable new: b ----- ")
error = 1e-5
while(True):
x,y,z = target_function(w,b)
delta_z = z - t
print("w=%f,b=%f,z=%f,delta_z=%f"%(w,b,z,delta_z))
if abs(delta_z) < error:
break
factor_b = 2*x+3*y
delta_b = delta_z/factor_b
print("factor_b=%f, delta_b=%f"%(factor_b, delta_b))
b = b - delta_b
print("done!")
print("final b=%f"%b)
# this version has a bug
def double_variable(w,b,t):
print("\ndouble variable: w, b -----")
error = 1e-5
while(True):
x,y,z = target_function(w,b)
delta_z = z - t
print("w=%f,b=%f,z=%f,delta_z=%f"%(w,b,z,delta_z))
if abs(delta_z) < error:
break
delta_b = delta_z/63/2
delta_w = delta_z/18/2
print("delta_b=%f, delta_w=%f"%(delta_b,delta_w))
b = b - delta_b
w = w - delta_w
print("done!")
print("final b=%f"%b)
print("final w=%f"%w)
# this is correct version
def double_variable_new(w,b,t):
print("\ndouble variable new: w, b -----")
error = 1e-5
while(True):
x,y,z = target_function(w,b)
delta_z = z - t
print("w=%f,b=%f,z=%f,delta_z=%f"%(w,b,z,delta_z))
if abs(delta_z) < error:
break
factor_b, factor_w = calculate_wb_factor(x,y)
delta_b = delta_z/factor_b/2
delta_w = delta_z/factor_w/2
print("factor_b=%f, factor_w=%f, delta_b=%f, delta_w=%f"%(factor_b, factor_w, delta_b,delta_w))
b = b - delta_b
w = w - delta_w
print("done!")
print("final b=%f"%b)
print("final w=%f"%w)
def calculate_wb_factor(x,y):
factor_b = 2*x+3*y
factor_w = 2*y
return factor_b, factor_w
if __name__ == '__main__':
w = 3
b = 4
t = 150
single_variable(w,b,t)
single_variable_new(w,b,t)
double_variable(w,b,t)
double_variable_new(w,b,t)
| StarcoderdataPython |
6409350 | #!/usr/bin/env python
import requests
import bs4
import pandas as pd
import argparse
import datetime
import time
import sys
from io import StringIO
from finviz.screener import Screener
from junit_xml import TestSuite, TestCase
from scrap_utils import *
# -----------------------------------------------------------------
# hand crafted scrapper
# -----------------------------------------------------------------
finviz_url = 'https://finviz.com/screener.ashx?'
scrap_delay = 1
def get_stock_table(tab,filter,page):
page_url = finviz_url + tab + filter + '&r=' + str((page - 1) * 20 + 1)
print('getting page', page, 'url:', page_url)
page = get_url(page_url)
soup = bs4.BeautifulSoup(page, 'lxml')
stock_table = soup.find_all('table')[16]
return pd.read_html(str(stock_table), header=0, index_col=1)[0]
def scrap_finviz(filter, tab_list = None):
# get the front page
front_page = get_url(finviz_url + filter)
# get the last page
soup = bs4.BeautifulSoup(front_page, 'lxml')
screener_pages = soup.find_all('a', {'class' : 'screener-pages'})
last_page = int(screener_pages[-1].text)
print('total pages:', last_page)
if tab_list is None:
tab_list = ['v=111&', 'v=121&', 'v=131&', 'v=141&', 'v=161&', 'v=171&',]
df_pages = []
for i in range(1,last_page+1):
df_tabs = []
for tab in tab_list:
time.sleep(scrap_delay)
df_tabs.append(get_stock_table(tab,filter,i))
df_pages.append(pd.concat(df_tabs, axis=1))
df_merged = pd.concat(df_pages)
return df_merged
def main():
parser = argparse.ArgumentParser(description='scrap finviz screener')
parser.add_argument('-output', type=str, help='output file')
parser.add_argument('-output_prefix', type=str, default='../stock_data/raw_daily_finviz/finviz_', help='prefix of the output file')
parser.add_argument('-use_bs4_scrapper', type=bool, default=True, help='Use my old bs4 scraper')
parser.add_argument('-date', type=str, default=str(datetime.date.today()), help='Specify the date')
parser.add_argument('-filter', type=str, action='append', help='filters apply to the screener')
parser.add_argument('-tab', type=str, action='append', help='tabs to the scrap')
parser.add_argument('-delay', type=int, help='delay in sec between each URL request')
parser.add_argument('-drop_col', type=str, action='append', default=[], help='remove columns')
args = parser.parse_args()
if args.filter is None:
args.filter = ['f=cap_microover', 'f=cap_microunder']
if args.delay is not None:
global scrap_delay
scrap_delay = args.delay
# check is the market closed today
if is_market_close(args.date):
print('The market is closed today')
return
if args.output is None:
filename = args.output_prefix + args.date + '.csv'
else:
filename = args.output
# scrap the data
if args.use_bs4_scrapper:
# use my old code
df_filters = []
for filter in args.filter:
df_filters.append(scrap_finviz(filter, args.tab))
df = pd.concat(df_filters)
else:
# use the finviz package
stock_list = Screener(filters=args.filter)
df = pd.read_csv(StringIO(stock_list.to_csv()))
df = df.loc[~df.index.duplicated(), ~df.columns.duplicated()]
df.drop(columns=['No.']+args.drop_col, inplace=True)
df.insert(0, 'Date', args.date, True)
df.to_csv(filename)
if __name__ == "__main__":
status = main()
sys.exit(0 if status is None else status)
| StarcoderdataPython |
11364983 | <filename>tests/scanner/test_data/fake_lien_scanner_data.py
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lien data to be used in the unit tests."""
from google.cloud.forseti.common.gcp_type import lien
from google.cloud.forseti.common.gcp_type import organization
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.gcp_type import resource
from google.cloud.forseti.scanner.audit import lien_rules_engine
ORGANIZATION = organization.Organization(
'234',
display_name='Organization 234',
full_name='organization/234/',
data='fake_org_data_234',
)
PROJECT = project.Project(
'p1',
project_number=11223344,
display_name='Project with lien',
parent=ORGANIZATION,
full_name='organization/234/project/p1/',
data='fake_project_data_2341',
)
_LIEN_JSON = """{
"name": "liens/l1",
"parent": "projects/p1",
"restrictions": ["resourcemanager.projects.delete"],
"origin": "testing",
"createTime": "2018-09-05T14:45:46.534Z"
}
"""
LIEN = lien.Lien.from_json(PROJECT, _LIEN_JSON)
VIOLATIONS = [lien_rules_engine.RuleViolation(
resource_id='p1',
resource_name='Project with lien',
resource_type=resource.ResourceType.PROJECT,
full_name='organization/234/project/p1/',
rule_index=0,
rule_name='Lien test rule',
violation_type='LIEN_VIOLATION',
resource_data='',
)]
| StarcoderdataPython |
3554471 | <reponame>johan--/commcare-hq
from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields import models as cdm
from corehq.apps.locations.models import Location
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
"""
Create a CustomDataFieldsDefinition based on existing custom location
information on each domain
"""
help = ''
def handle(self, *args, **options):
for domain in Domain.get_all_names():
fields_definition = cdm.CustomDataFieldsDefinition.get_or_create(
domain,
'LocationFields'
)
had_fields = bool(fields_definition.fields)
existing_field_slugs = set([field.slug for field in fields_definition.fields])
for location in Location.by_domain(domain):
location_data = location.metadata
for key in location_data.keys():
if (key and key not in existing_field_slugs
and not cdm.is_system_key(key)):
existing_field_slugs.add(key)
fields_definition.fields.append(cdm.CustomDataField(
slug=key,
label=key,
is_required=False,
))
for field in fields_definition.fields:
if cdm.is_system_key(field.slug):
fields_definition.fields.remove(field)
# Only save a definition for domains which use custom location data
if fields_definition.fields or had_fields:
fields_definition.save()
print 'finished domain "{}"'.format(domain)
| StarcoderdataPython |
94834 | import os
import shutil
#フォルダ名指定
dir_name1 = "画像フォルダ"
dir_name2 = "音声フォルダ"
dir_name3 = "映像フォルダ"
dir_name4 = "テキストフォルダ"
#整頓するデータの最低数指定
data_number = 3
if(len(os.listdir()) >= data_number):
try:
data_files = os.listdir()
os.mkdir(dir_name1)
os.mkdir(dir_name2)
os.mkdir(dir_name3)
os.mkdir(dir_name4)
except FileExistsError:
print("FileExistsError:すでに同じ名前のフォルダが存在しています")
dir_name1_path = dir_name1 + "/"
dir_name2_path = dir_name2 + "/"
dir_name3_path = dir_name3 + "/"
dir_name4_path = dir_name4 + "/"
for files in data_files:
if(files.endswith(".png") or files.endswith(".jpg") or files.endswith(".bmp")):
shutil.move(files, dir_name1_path + files)
elif(files.endswith(".mp3") or files.endswith(".wav")):
shutil.move(files, dir_name2_path + files)
elif(files.endswith(".mp4") or files.endswith(".mov")):
shutil.move(files, dir_name3_path + files)
elif(files.endswith(".txt") or files.endswith(".csv")):
shutil.move(files, dir_name4_path + files)
else:
pass
else:
print("整頓するほどのデータが存在しません") | StarcoderdataPython |
1759243 |
import re
import lxml.html
from link_crawler import link_crawler
FIELDS = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
def scrape_callback(url, html):
if re.search('/view/',url):
tree = lxml.html.fromstring(html)
row = [tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content() for field in FIELDS]
print(url, row)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com', r'[0-9a-zA-Z./:]*/view/[0-9a-zA-Z./:]*', delay=1,
num_retries=1, user_agent='BadCrawel', scrape_callback=scrape_callback) | StarcoderdataPython |
8122775 | from harborclient import base
class RepositoryManager(base.Manager):
def get(self, id):
"""Get a Repository."""
return self._get("/repositories/%s" % id)
def list(self, project):
"""Get repositories accompany with relevant project and repo name."""
repositories = self._list("/repositories?project_id=%s" % project)
return repositories
def list_tags(self, repo_name):
"""Get the tag of the repository."""
return self.api.client.get(
"/repositories/%s/tags" % repo_name)
def get_manifests(self, repo_name, tag):
"""Get manifests of a relevant repository."""
return self.api.client.get(
"/repositories/%(repo_name)s/tags/%(tag)s/manifest"
% {"repo_name": repo_name, "tag": tag})
def get_top(self, count):
"""Get public repositories which are accessed most."""
return self._list("/repositories/top?count=%s" % count)
| StarcoderdataPython |
11232766 | <gh_stars>0
def mytest():
print('test_module_3_hello')
| StarcoderdataPython |
230618 | <filename>v_m_b/image/generateManifest.py
# downloading region
import io
import logging
import sys
from pathlib import PurePath, Path
import aiofiles
from PIL import Image
IMG_JPG='JPEG'
IMG_TIF='TIFF'
JPG_EXT= 'JPG'
TIF_EXT= 'TIF'
# The keys are the image file types as extracted from the blob
# The values are lists of possible file extension which correspond to the key
# to those lists
BUDA_supported_file_exts: {} = {IMG_JPG : [IMG_JPG, JPG_EXT], IMG_TIF :[IMG_TIF, TIF_EXT]}
def is_BUDA_Matching_file_ext(file_name: str, image_data_format: str) -> bool:
"""
Returns true if the incoming file name is in BUDAs oo
:param file_name: name of file to test (to provide suffix)
:param image_data_format: from internal image data- what the file thinks it is.
:return:
"""
if not image_data_format or not file_name:
return False
# is the given format supported at all?
if image_data_format not in BUDA_supported_file_exts.keys():
return False
file_suffix = Path(file_name).suffix.upper()[1:]
# Is the extension in one of the lists of the supported types?
matches:[] = [ x for x in BUDA_supported_file_exts[image_data_format] if x.upper() == file_suffix]
return len(matches) > 0
async def generateManifest_a(ig_container: PurePath, image_list: []) -> []:
"""
this actually generates the manifest. See example in the repo. The example corresponds to W22084, image group I0886.
:param ig_container: path of parent of image group
:param image_list: list of image names
:returns: list of internal data for each file in image_list
"""
res: [] = []
image_file_name: str
for image_file_name in image_list:
try:
image_path: Path = Path(ig_container, image_file_name)
imgdata = {"filename": image_file_name}
res.append(imgdata)
# extracted from fillData
async with aiofiles.open(image_path, "rb") as image_file:
image_buffer: bytes = await image_file.read()
bio: io.BytesIO = io.BytesIO(image_buffer)
fillDataWithBlobImage(bio, imgdata)
except:
si = sys.exc_info()
logging.error(f"processing {image_file_name} async file processing {si[0]} {si[1]} ")
return res
def generateManifest_s(ig_container: PurePath, image_list: []) -> []:
"""
this actually generates the manifest. See example in the repo. The example corresponds to W22084, image group I0886.
:param ig_container: path of parent of image group
:param image_list: list of image names
:returns: list of internal data for each file in image_list
"""
res = []
image_file_name: str
for image_file_name in image_list:
image_path: Path = Path(ig_container, image_file_name)
imgdata = {"filename": image_file_name}
res.append(imgdata)
# extracted from fillData
with open(str(image_path), "rb") as image_file:
image_buffer = image_file.read()
# image_buffer = io.BytesIO(image_file.read())
try:
fillDataWithBlobImage(io.BytesIO(image_buffer), imgdata)
except:
exc = sys.exc_info()
logging.error(f"processing {image_file_name} sync file processing {exc[0]} {exc[1]} ")
# asyncio.run(fillData(image_path, imgdata))
return res
def fillDataWithBlobImage(blob: io.BytesIO, data: dict):
"""
This function populates a dict containing the height and width of the image
the image is the binary blob returned by s3, an image library should be used to treat it
please do not use the file system (saving as a file and then having the library read it)
This could be coded in a faster way, but the faster way doesn't work with group4 tiff:
https://github.com/python-pillow/Pillow/issues/3756
For pilmode, see
https://pillow.readthedocs.io/en/5.1.x/handbook/concepts.html#concept-modes
They are different from the Java ones:
https://docs.oracle.com/javase/8/docs/api/java/awt/image/BufferedImage.html
but they should be enough. Note that there's no 16 bit
"""
# blob2 = io.BytesIO(blob)
# size = blob2.getbuffer().nbytes
# im = Image.open(blob2)
size = blob.getbuffer().nbytes
im = Image.open(blob)
data["width"] = im.width
data["height"] = im.height
# jimk volume_manifest_builder #52
if not is_BUDA_Matching_file_ext(data["filename"], im.format):
data["format"] = im.format
# debian PIL casts these to floats, and debian JSON can't dump them to string
data["dpi"] = [int(x) for x in im.info['dpi']] if 'dpi' in im.info.keys() else []
# we indicate sizes of the more than 1MB
if size > 1000000:
data["size"] = size
# end region
| StarcoderdataPython |
11235410 | <reponame>TrongNghiaRyt/SE347-project-Django-Website
from django.contrib import admin
from .models import *
class Answers(admin.TabularInline):
model = Answer
admin.site.site_header = 'VehiTest admin'
class QuestionAndAnswers(admin.ModelAdmin):
inlines = [Answers]
list_display = ('id', 'kind', 'content')
list_filter = ('kind',)
fields = (
'content',
'pic',
'kind',
)
class kind(admin.TabularInline):
model = QuestionType
class Examination(admin.ModelAdmin):
inlines = [kind]
fieldsets = (
(None, {
"fields": (
'name',
'exam_times',
'question_number',
'question_correct',
),
}),
)
admin.site.register(Question, QuestionAndAnswers)
admin.site.register(exam, Examination)
admin.site.register(Answer)
| StarcoderdataPython |
3510359 | # Generated by Django 2.0.7 on 2018-09-25 14:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfileDetail',
fields=[
('id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('job', models.CharField(max_length=200)),
],
),
]
| StarcoderdataPython |
320476 | """
Test CPS and PUF data file contents.
"""
import os
import pytest
import numpy as np
def unique_recid(data, dataname):
"""
Test that RECID values are unique.
"""
recid = data['RECID']
unique, counts = np.unique(recid, return_counts=True)
recid_count = dict(zip(unique, counts))
duplicates = False
msg = ''
for rid in sorted(recid_count.keys()):
if recid_count[rid] > 1:
duplicates = True
msg += '\nRECID={} has COUNT={}'.format(rid, recid_count[rid])
if duplicates:
title = 'The following {} RECIDs have COUNTS greater than one:'
raise ValueError(title.format(dataname) + msg)
def min_max(data, meta, dataname):
"""
Test that variable variables are within their minimum/maximum range.
"""
for var in meta.keys():
availability = meta[var]['availability']
min_value = meta[var]['range']['min']
max_value = meta[var]['range']['max']
in_data = True
if dataname not in availability:
in_data = False
if in_data:
m = '{}-{} contains values less than min value'.format(dataname,
var)
assert np.all(data[var] >= min_value), m
m = '{}-{} contains values greater than max value'.format(dataname,
var)
assert np.all(data[var] <= max_value), m
def relationships(data, dataname):
"""
Test the relationships between variables.
Note (1): we have weakened the XTOT == sum of nu18, n1820, n21 assertion
for the PUF because in PUF data the value of XTOT is capped by IRS-SOI.
Note (2): we have weakened the n24 <= nu18 assertion for the PUF because
the only way to ensure it held true would be to create extreamly small
bins during the tax unit matching process, which had the potential to
reduce the overall match accuracy.
"""
eq_str = '{}-{} not equal to {}'
less_than_str = '{}-{} not less than or equal to {}'
tol = 0.020001
eq_vars = [('e00200', ['e00200p', 'e00200s']),
('e00900', ['e00900p', 'e00900s']),
('e02100', ['e02100p', 'e02100s'])]
for lhs, rhs in eq_vars:
if not np.allclose(data[lhs], data[rhs].sum(axis=1), atol=tol):
raise ValueError(eq_str.format(dataname, lhs, rhs))
nsums = data[['nu18', 'n1820', 'n21']].sum(axis=1)
if dataname == 'CPS':
m = eq_str.format(dataname, 'XTOT', 'sum of nu18, n1820, n21')
assert np.all(data['XTOT'] == nsums), m
else:
# see Note (1) in docstring
m = less_than_str.format(dataname, 'XTOT', 'sum of nu18, n1820, n21')
assert np.all(data['XTOT'] <= nsums), m
m = less_than_str.format(dataname, 'n24', 'nu18')
if dataname == 'CPS':
assert np.all(data['n24'] <= data['nu18']), m
else:
# see Note (2) in docstring
m = 'Number of records where n24 > nu18 has changed'
assert (data['n24'] > data['nu18']).sum() == 14941, m
subdata = data[data['n24'] > data['nu18']]
m = 'n24 > nu18 + 3'
assert np.all(subdata['n24'] <= subdata['nu18'] + 3), m
m = less_than_str.format(dataname, 'e00650', 'e00600')
assert np.all(data['e00600'] >= data['e00650']), m
m = less_than_str.format(dataname, 'e01700', 'e01500')
assert np.all(data['e01500'] >= data['e01700']), m
m = less_than_str.format(dataname, 'pencon_p', 'e00200p+pencon_p')
assert np.all((data['e00200p'] + data['pencon_p']) >= data['pencon_p']), m
m = less_than_str.format(dataname, 'pencon_s', 'e00200s+pencon_s')
assert np.all((data['e00200s'] + data['pencon_s']) >= data['pencon_s']), m
def variable_check(test_path, data, dataname):
"""
Test aggregate values in the data.
"""
expected_file_name = '{}_agg_expected.txt'.format(dataname)
efile_path = os.path.join(test_path, expected_file_name)
with open(efile_path, 'r') as efile:
expected_txt = efile.readlines()
expected_sum = dict()
expected_min = dict()
expected_max = dict()
for line in expected_txt[1:]:
txt = line.rstrip()
split = txt.split()
assert len(split) == 4
var = split[0]
expected_sum[var] = int(split[1])
expected_min[var] = int(split[2])
expected_max[var] = int(split[3])
# loop through each column in the dataset and check sum, min, max
actual_txt = '{:20}{:>15}{:>15}{:>15}\n'.format('VARIABLE',
'SUM', 'MIN', 'MAX')
var_inform = '{:20}{:15d}{:15d}{:15d}\n'
diffs = False
diff_list_str = '' # string to hold all of the variables with errors
new_vars = False
new_var_list_str = '' # srint to hold all of the unexpected variables
for var in sorted(data.columns):
sum = int(data[var].sum())
min = int(data[var].min())
max = int(data[var].max())
actual_txt += var_inform.format(var, sum, min, max)
try:
var_diff = (sum != expected_sum[var] or
min != expected_min[var] or
max != expected_max[var])
if var_diff:
diffs = True
diff_list_str += var + '\n'
except KeyError:
# if the variable is not expected, print a new message
new_vars = True
new_var_list_str += var + '\n'
# check for any missing variables
missing_vars = False
missing_vars_set = set(expected_sum.keys()) - set(data.columns)
if missing_vars_set:
missing_vars = True
missing_vars_str = '\n'.join(v for v in missing_vars_set)
# if there is an error, write the actual file
if diffs or new_vars or missing_vars:
msg = '{}\n'.format(dataname.upper)
actual_file_name = '{}_agg_actual.txt'.format(dataname)
actual_file_path = os.path.join(test_path, actual_file_name)
with open(actual_file_path, 'w') as afile:
afile.write(actual_txt)
# modify error message based on which errors are raised
if diffs:
diff_msg = 'Aggregate results differ for following variables:\n'
diff_msg += diff_list_str
msg += diff_msg + '\n'
if new_vars:
new_msg = 'The following unexpected variables were discoverd:\n'
new_msg += new_var_list_str
msg += new_msg + '\n'
if missing_vars:
msg += 'The following expected variables are missing in the data:'
msg += '\n' + missing_vars_str + '\n\n'
msg += 'If new results OK, copy {} to {}'.format(actual_file_name,
expected_file_name)
raise ValueError(msg)
def check_cps_benefits(data):
"""
Test benefit variables in CPS data.
"""
bnames = ['mcare', 'mcaid', 'ssi', 'snap', 'wic',
'tanf', 'housing', 'vet', 'other']
expect_minben = 0
# specify expected benefit statistics in CPS data
expect_ben_stat = dict()
# .. maximum value per filing unit for benefit
expect_ben_stat['max'] = {
'mcare': 92976, # <--- implies a filing unit with 8 beneficiaries
'mcaid': 98440, # <--- implies a filing unit with 14 beneficiaries
'ssi': 64378,
'snap': 26569,
'wic': 4972,
'tanf': 159407, # <--- SEEMS ABSURD ($13,284/month)
'housing': 53253,
'vet': 169920, # <--- HIGH ($14,160/month) military pension or what?
'other': 40211
}
# .. minimum value per filing unit for positive benefit
expect_ben_stat['min'] = {
'mcare': 11622, # <--- the actuarial value of Medicare insurance
'mcaid': 7031, # <--- the actuarial value of Medicaid insurance
'ssi': 1, # <--- SEEMS LOW
'snap': 9, # <--- SEEMS LOW
'wic': 241,
'tanf': 1, # <--- SEEMS LOW
'housing': 1265,
'vet': 9890, # <--- is this actuarial value of VA hospital costs?
'other': 3
}
# .. mean value per filing unit of positive benefit
expect_ben_stat['avg'] = {
'mcare': 14928,
'mcaid': 13192,
'ssi': 7913,
'snap': 2907,
'wic': 748,
'tanf': 9117,
'housing': 7048,
'vet': 29912,
'other': 4706
}
# compare actual and expected benefit statistics
error_msg = ''
wgt = data['s006'] * 0.01
for bname in bnames:
col = '{}_ben'.format(bname)
assert col in data.columns
ben = data[col]
minben = ben.min()
maxben = ben.max()
pos = ben > 0
minpben = ben[pos].min()
avgben = (ben[pos] * wgt[pos]).sum() / wgt[pos].sum()
if not np.allclose([minben], [0], rtol=0, atol=0.1):
msg = '\nCPS {}_ben minben={} != 0'
error_msg += msg.format(bname, minben)
exp_minpben = expect_ben_stat['min'][bname]
if not np.allclose([minpben], [exp_minpben], rtol=0, atol=0.1):
msg = '\nCPS {}_ben minpben={} != {}'
error_msg += msg.format(bname, minpben, exp_minpben)
exp_maxben = expect_ben_stat['max'][bname]
if not np.allclose([maxben], [exp_maxben], rtol=0, atol=0.1):
msg = '\nCPS {}_ben maxben={} != {}'
error_msg += msg.format(bname, maxben, exp_maxben)
expect_avgben = expect_ben_stat['avg'][bname]
if not np.allclose([avgben], [expect_avgben], rtol=0, atol=0.5):
msg = '\nCPS {}_ben avgben={:.2f} != {:.2f}'
error_msg += msg.format(bname, avgben, expect_avgben)
# check that mc??? benefits are actuarial values of health insurance
if bname == 'mcare' or bname == 'mcaid':
ratio = float(maxben) / minpben
expect_ratio = round(ratio)
if not np.allclose([ratio], [expect_ratio], rtol=0, atol=0.001):
msg = '\nCPS {}_ben ratio={:.6f} != {:.0f}'
error_msg += msg.format(bname, ratio, expect_ratio)
if error_msg:
raise ValueError(error_msg)
@pytest.mark.requires_pufcsv
def test_pufcsv_data(puf, metadata, test_path):
"""
Test PUF data.
"""
unique_recid(puf, 'PUF')
min_max(puf, metadata, 'puf')
relationships(puf, 'PUF')
variable_check(test_path, puf, 'puf')
def test_cpscsv_data(cps, metadata, test_path):
"""
Test CPS data.
"""
unique_recid(cps, 'CPS')
min_max(cps, metadata, 'cps')
relationships(cps, 'CPS')
variable_check(test_path, cps, 'cps')
check_cps_benefits(cps)
| StarcoderdataPython |
3280209 | '''
This script includes two functionalities: chain2vcf and chain2bed
chain2vcf: Converts a chain file to a VCF file.
This conversion is not capble of handling translocations and inversions. Users should specify
a list of "chain ids" that contain only indels.
Example:
# This output VCF is used to convert h38 coordinates to T2T coordinates.
python chain_utils.py chain2vcf -i hg38.t2t-chm13-v1.0.over.chain -c 1-23 -o main_chain-h38.t2t_chm13.vcf
chain2bed: Converts a chain file to a BED file.
The conversion only supports one chain per contig (we currently don't support translocations).
Users need to provide a list of chain ids for conversion.
The output BED file will contain the "chain block" regions.
Example:
python chain_utils.py chain2bed -i hg38.t2t-chm13-v1.0.over.chain -c 1-23 -o hg38.t2t-chm13-v1.0.conf_regions.bed
'''
import argparse
import sys
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
#subparsers = parser.add_subparsers(dest='subparser')
parser_c2v = subparsers.add_parser('chain2vcf')
parser_c2v.add_argument(
'-i', '--in_chain', required=True,
help='Path to the chain file to be converted.'
)
parser_c2v.add_argument(
'-c', '--chain_ids', required=True,
help='Chain ids to be converted, separated by commas and hyphens. E.g., "1-9", "1,5-8".'
)
parser_c2v.add_argument(
'-o', '--out_vcf',
help='Path to the output VCF file.'
)
parser_c2v.set_defaults(func=chain2vcf)
parser_c2b = subparsers.add_parser('chain2bed')
parser_c2b.add_argument(
'-i', '--in_chain', required=True,
help='Path to the chain file to be converted.'
)
parser_c2b.add_argument(
'-c', '--chain_ids', required=True,
help='Chain ids to be converted, separated by commas and hyphens. E.g., "1-9", "1,5-8".'
)
parser_c2b.add_argument(
'-o', '--out_bed',
help='Path to the output BED file.'
)
parser_c2b.set_defaults(func=chain2bed)
args = parser.parse_args()
args.func(args)
#kwargs = vars(parser.parse_args())
#globals()[kwargs.pop('subparser')](**kwargs)
class Chain_Fields():
HEADER = 0
SCORE = 1
TNAME = 2
TSIZE = 3
TSTRAND = 4
TSTART = 5
TEND = 6
QNAME = 7
QSIZE = 8
QSTRAND = 9
QSTART = 10
QEND = 11
CID = 12
ALN_SIZE = 0
ALN_DT = 1
ALN_DQ = 2
class Chain2Bed():
def __init__(self, out_bed=sys.stdout, chain_hdr='', CF=None):
assert(chain_hdr[CF.HEADER] == 'chain')
assert(chain_hdr[CF.TNAME] == chain_hdr[CF.QNAME])
self.out_bed = out_bed
self.CF = CF
self.contig = chain_hdr[self.CF.QNAME]
self.qstart = int(chain_hdr[self.CF.QSTART])
self.tstart = int(chain_hdr[self.CF.TSTART])
self.bed_records = []
def add(self, chain_aln):
if len(self.bed_records) == 0 or self.tstart != self.bed_records[-1][1]:
self.bed_records.append([self.tstart, self.tstart + int(chain_aln[self.CF.ALN_SIZE])])
else:
self.bed_records[-1][1] = self.tstart + int(chain_aln[self.CF.ALN_SIZE])
self.tstart += int(chain_aln[self.CF.ALN_SIZE]) + int(chain_aln[self.CF.ALN_DT])
self.qstart += int(chain_aln[self.CF.ALN_SIZE]) + int(chain_aln[self.CF.ALN_DQ])
def write(self):
for record in self.bed_records:
print(f'{self.contig}\t{record[0]}\t{record[1]}', file=self.out_bed)
class Chain2Vcf():
def __init__(self, out_vcf=sys.stdout, chain_hdr='', CF=None):
assert(chain_hdr[CF.HEADER] == 'chain')
# assert(chain_hdr[CF.TNAME] == chain_hdr[CF.QNAME])
self.out_vcf = out_vcf
self.CF = CF
self.contig = chain_hdr[self.CF.QNAME]
# Add one b/c VCF format is one-based.
self.qstart = int(chain_hdr[self.CF.QSTART]) + 1
self.tstart = int(chain_hdr[self.CF.TSTART]) + 1
if chain_hdr[self.CF.TNAME] == chain_hdr[self.CF.QNAME]:
# INS w.r.t the query sequence.
if self.qstart < self.tstart:
ref = 'A'
qry = 'A' * (self.tstart - self.qstart + 1)
print(f'{self.contig}\t{self.qstart}\t.\t{ref}\t{qry}\t.\tAUTO\tTPOS={self.tstart}',
file=self.out_vcf)
# DEL
elif self.qstart > self.tstart:
ref = 'A' * (self.qstart - self.tstart + 1)
qry = 'A'
print(f'{self.contig}\t{self.qstart}\t.\t{ref}\t{qry}\t.\tAUTO\tTPOS={self.tstart}',
file=self.out_vcf)
else:
len_t = int(chain_hdr[self.CF.TEND]) - int(chain_hdr[self.CF.TSTART])
len_q = int(chain_hdr[self.CF.QEND]) - int(chain_hdr[self.CF.QSTART])
if len_t > len_q:
ref = 'A'
qry = 'A' * (len_t - len_q + 1)
print(f'{self.contig}\t{self.qstart}\t.\t{ref}\t{qry}\t.\tAUTO\tTPOS={self.tstart}',
file=self.out_vcf)
elif len_q > len_t:
ref = 'A' * (len_q - len_t + 1)
qry = 'A'
print(f'{self.contig}\t{self.qstart - len_q}\t.\t{ref}\t{qry}\t.\tAUTO\tTPOS={self.tstart}',
file=self.out_vcf)
def add(self, chain_aln):
self.tstart += int(chain_aln[self.CF.ALN_SIZE])
self.qstart += int(chain_aln[self.CF.ALN_SIZE])
len_t = int(chain_aln[self.CF.ALN_DT])
len_q = int(chain_aln[self.CF.ALN_DQ])
if (len_t > len_q):
# Insertion wrt the query seq
len_t -= len_q
len_q = 0
seq_t = 'A' * (len_t + 1)
seq_q = 'A' * (len_q + 1)
print(f'{self.contig}\t{self.qstart}\t.\t{seq_q}\t{seq_t}\t.\tAUTO\t' +
f'TPOS={self.tstart};ALN_DT={int(chain_aln[self.CF.ALN_DT])};ALN_DQ={int(chain_aln[self.CF.ALN_DQ])}',
file=self.out_vcf)
elif (len_t < len_q):
# Deletion wrt the query seq
len_q -= len_t
len_t = 0
seq_t = 'A' * (len_t + 1)
seq_q = 'A' * (len_q + 1)
print(f'{self.contig}\t{self.qstart}\t.\t{seq_q}\t{seq_t}\t.\tAUTO\t' +
f'TPOS={self.tstart};ALN_DT={int(chain_aln[self.CF.ALN_DT])};ALN_DQ={int(chain_aln[self.CF.ALN_DQ])}',
file=self.out_vcf)
self.tstart += int(chain_aln[self.CF.ALN_DT])
self.qstart += int(chain_aln[self.CF.ALN_DQ])
# Parse raw `chain_ids` and return a list of chain ids.
#
# Input:
# chain_ids: A string of chain_ids that are separated by commas and hyphens.
# Output:
# A list containing individual chain ids.
def parse_chain_id(chain_ids):
list_chain = []
chain_ids = chain_ids.split(',')
for c in chain_ids:
c = c.split('-')
if len(c) == 1:
list_chain.append(c[0])
elif len(c) == 2:
for i in range(int(c[0]), int(c[1]) + 1):
list_chain.append(str(i))
return list_chain
def get_contig_name(fn, chain_ids, CF):
dict_cid_contig = {}
with open(fn, 'r') as f:
for line in f:
line = line.split()
if len(line) == 13:
if line[CF.CID] in chain_ids:
dict_cid_contig[line[CF.CID]] = line[CF.TNAME]
return dict_cid_contig
def write_vcf_hdr(f_out, chain_ids, dict_cid_contig):
print('##fileformat=VCFv4.3', file=f_out)
print('##FILTER=<ID=AUTO,Description="Generated automatically.">', file=f_out)
print(dict_cid_contig)
for cid, contig in dict_cid_contig.items():
print(f'##contig=<ID={contig}>', file=f_out)
print('##INFO=<ID=TPOS,Number=A,Type=Integer,Description="Variant position on SEQ_T.">',
file=f_out)
print('##INFO=<ID=ALN_DT,Number=A,Type=Integer,Description="Length of gap on SEQ_T.">',
file=f_out)
print('##INFO=<ID=ALN_DQ,Number=A,Type=Integer,Description="Length of gap on SEQ_Q.">',
file=f_out)
print('#CHROM POS ID REF ALT QUAL FILTER INFO', file=f_out)
def chain2bed(args):
print('chain2bed', file=sys.stderr)
# Constants.
CF = Chain_Fields()
chain_ids = parse_chain_id(args.chain_ids)
print('Chain IDs to be processed', chain_ids, file=sys.stderr)
if not args.out_bed:
f_out = sys.stdout
else:
f_out = open(args.out_bed, 'w')
f = open(args.in_chain, 'r')
current_id = ''
chain = None
for line in f:
line = line.split()
# Chain header
if len(line) == 13:
if line[CF.CID] in chain_ids:
chain = Chain2Bed(out_bed=f_out, chain_hdr=line, CF=CF)
if line[CF.TNAME] != line[CF.QNAME]:
del chain
chain = None
elif len(line) == 0:
current_id = ''
if chain:
del chain
chain = None
elif len(line) == 3:
if chain:
chain.add(line)
elif len(line) == 1:
if chain:
chain.write()
del chain
chain = None
def chain2vcf(args):
# Constants.
CF = Chain_Fields()
chain_ids = parse_chain_id(args.chain_ids)
if not args.out_vcf:
f_out = sys.stdout
else:
f_out = open(args.out_vcf, 'w')
write_vcf_hdr(f_out, chain_ids,
get_contig_name(args.in_chain, chain_ids, CF))
f = open(args.in_chain, 'r')
current_id = ''
chain = None
for line in f:
line = line.split()
# Chain header
if len(line) == 13:
if line[CF.CID] in chain_ids:
# if line[CF.QNAME] in ['chr2']:
chain = Chain2Vcf(out_vcf=f_out, chain_hdr=line, CF=CF)
if line[CF.TNAME] != line[CF.QNAME]:
print('[Error] Contig names mismatch',
line[CF.TNAME], line[CF.QNAME], file=sys.stderr)
exit()
del chain
chain = None
elif len(line) == 0:
current_id = ''
if chain:
del chain
chain = None
elif len(line) == 3:
if chain:
chain.add(line)
elif len(line) == 1:
if chain:
del chain
chain = None
if __name__ == '__main__':
parse_args()
| StarcoderdataPython |
4960084 | <filename>steerclear/settings/windows_settings.py
SQLALCHEMY_DATABASE_URI = 'sqlite:///C://steerclear.db'
TEST_SQLALCHEMY_DATABASE_URI = 'sqlite:///C://test.db'
| StarcoderdataPython |
366325 | <filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
install_requires = [
'beancount',
'pyyaml',
]
setup(
name="BeanPorter",
version='0.1',
description="Beancount importer",
long_description=
"""
Beancount importer.
""",
license="MIT",
author="WeZZard",
author_email="<EMAIL>",
url="https://github.com/WeZZard/BeanPorter",
download_url="https://github.com/WeZZard/BeanPorter",
packages=find_packages(where='src'),
package_dir={
"BeanPorter": "src/BeanPorter",
"BeanPorter.bpcml": "src/BeanPorter/bpcml",
},
install_requires = install_requires,
package_data={
'BeanPorter': ['bean_porter_config.yaml']
},
entry_points = {'console_scripts': ['bean-porter=BeanPorter:main']},
python_requires='>=3.6',
)
| StarcoderdataPython |
3364452 | <reponame>smartanthill/smartanthill
# Copyright (C) <NAME> <<EMAIL>>
# See LICENSE for details.
from binascii import hexlify
from smartanthill.litemq.exchange import ExchangeFactory
from smartanthill.service import SAMultiService
class LiteMQService(SAMultiService):
def __init__(self, name, options):
SAMultiService.__init__(self, name, options)
self._exchanges = {}
def stopService(self):
assert self._exchanges == {}
SAMultiService.stopService(self)
def declare_exchange(self, name, type_="direct"):
if name in self._exchanges:
return
self._exchanges[name] = ExchangeFactory().newExchange(name, type_)
self.log.info("Declared new exchange '%s' with type '%s'" % (
name, type_))
def undeclare_exchange(self, name):
if name not in self._exchanges:
return
del self._exchanges[name]
self.log.info("Undeclared exchange '%s'" % name)
def produce(self, exchange, routing_key, message, properties=None):
assert exchange in self._exchanges
self.log.debug(
"Produce new message '%s' with routing_key '%s' to exchange '%s'" %
(hexlify(message) if properties and "binary" in properties and
properties["binary"] else message, routing_key, exchange))
return self._exchanges[exchange].publish(routing_key, message,
properties)
def consume(self, exchange, queue, routing_key, callback, ack=False):
assert exchange in self._exchanges
self._exchanges[exchange].bind_queue(queue, routing_key, callback, ack)
self.log.info("Registered consumer with exchange=%s, queue=%s, "
"routing_key=%s, ack=%s" % (exchange, queue, routing_key,
ack))
def unconsume(self, exchange, queue):
assert exchange in self._exchanges
self._exchanges[exchange].unbind_queue(queue)
self.log.info("Unregistered consumer with exchange=%s "
"and queue=%s" % (exchange, queue))
def makeService(name, options):
return LiteMQService(name, options)
| StarcoderdataPython |
8158611 | <filename>QCA4020_SDK/target/sectools/qdn/sectools/features/isc/parsegen/elf_support/elf_vars.py
# ===============================================================================
#
# Copyright (c) 2013-2017 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# ===============================================================================
from sectools.common.utils.c_base import ValPolicy
from sectools.common.parsegen.elf.segment.type import PT_NOTE
from sectools.common.parsegen.elf.segment.type import PT_PHDR
# Constants
ELF_BLOCK_ALIGN = 0x1000 # Block alignment size
MAX_PHDR_COUNT = 100 # Maximum allowable program headers
# Debug files
FILE_PROG_SEG_IN = 'sec_elf_in_prog_segment'
FILE_HASH_SEG_IN = 'sec_elf_in_hash_segment'
FILE_DECRYPTED_IN = 'sec_elf_in_decrypted'
FILE_PROG_HASH_REMOVED_IN = 'sec_elf_in_prog_hash_removed'
NON_ENCAP_SEGMENTS = "NON_ENCAP_SEGMENTS"
NON_ENCRYPT_SEGMENTS = "NON_ENCRYPT_SEGMENTS"
NON_HASHABLE_SEGMENTS = "NON_HASHABLE_SEGMENTS"
POLICY_NON_LOAD_OUTSIDE_LOAD = "POLICY_NON_LOAD_OUTSIDE_LOAD"
POLICY_OVERLAPPING_SEGMENTS_VIRTUAL = "POLICY_OVERLAPPING_SEGMENTS_VIRTUAL"
POLICY_OVERLAPPING_SEGMENTS_PHYSICAL = "POLICY_OVERLAPPING_SEGMENTS_PHYSICAL"
# Dictionary for use in addr validation
PHY_ADDR_VALIDATION_DICT = {
'isPhy': True,
'policy': lambda: elf_vars[POLICY_OVERLAPPING_SEGMENTS_PHYSICAL],
'tString': 'Phys',
'eString': 'Following segments physical address overlap: ' + '\n'}
VIR_ADDR_VALIDATION_DICT = {
'isPhy': False,
'policy': lambda: elf_vars[POLICY_OVERLAPPING_SEGMENTS_VIRTUAL],
'tString': 'Vir',
'eString': 'Following segments virtual address overlap: ' + '\n'}
elf_vars = {
# Selected policy for overlapping segments
POLICY_OVERLAPPING_SEGMENTS_PHYSICAL: ValPolicy(ValPolicy.WARN),
POLICY_OVERLAPPING_SEGMENTS_VIRTUAL: ValPolicy(ValPolicy.WARN),
POLICY_NON_LOAD_OUTSIDE_LOAD: ValPolicy(ValPolicy.ERROR),
# Segments allowed to exist outside the LOAD segments
NON_ENCAP_SEGMENTS: [PT_NOTE],
# Segments to ignore for hashing
NON_HASHABLE_SEGMENTS: [PT_PHDR],
# Segments not encrypted
NON_ENCRYPT_SEGMENTS: [PT_NOTE],
}
# Test API to modify error policy
def set_overlapping_segments_policies(phyPolicy, virPolicy):
elf_vars[POLICY_OVERLAPPING_SEGMENTS_PHYSICAL] = phyPolicy
elf_vars[POLICY_OVERLAPPING_SEGMENTS_VIRTUAL] = virPolicy
# Test API to save error policy
def get_overlapping_segments_policies():
return (elf_vars[POLICY_OVERLAPPING_SEGMENTS_PHYSICAL],
elf_vars[POLICY_OVERLAPPING_SEGMENTS_VIRTUAL])
def set_non_encap_segments(segments):
elf_vars[NON_ENCAP_SEGMENTS] = segments
def set_non_hashable_segments(segments):
elf_vars[NON_HASHABLE_SEGMENTS] = segments
| StarcoderdataPython |
375779 | <filename>gym_walk/envs/walk_env.py
import sys
import numpy as np
from six import StringIO
from string import ascii_uppercase
from typing import Optional
import gym
from gym import spaces, utils
from gym.envs.toy_text.utils import categorical_sample
WEST, EAST = 0, 1
class WalkEnv(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, n_states=7, p_stay=0.0, p_backward=0.5):
# two terminal states added
self.shape = (1, n_states + 2)
self.start_state_index = self.shape[1] // 2
self.nS = nS = np.prod(self.shape)
self.nA = nA = 2
self.P = {}
for s in range(nS):
self.P[s] = {}
for a in range(nA):
p_forward = 1.0 - p_stay - p_backward
s_forward = np.clip(s - 1 if a == WEST else s + 1, 0, nS - 1) if s != 0 and s != nS - 1 else s
s_backward = np.clip(s + 1 if a == WEST else s - 1, 0, nS - 1) if s != 0 and s != nS - 1 else s
r_forward = 1.0 if s == nS - 2 and s_forward == nS - 1 else 0.0
r_backward = 1.0 if s == nS - 2 and s_backward == nS - 1 else 0.0
d_forward = s >= nS - 2 and s_forward == nS - 1 or s <= 1 and s_forward == 0
d_backward = s >= nS - 2 and s_backward == nS - 1 or s <= 1 and s_backward == 0
self.P[s][a] = [
(p_forward, s_forward, r_forward, d_forward),
(p_stay, s, 0.0, s == nS - 1 or s == 0),
(p_backward, s_backward, r_backward, d_backward)
]
self.isd = np.zeros(nS)
self.isd[self.start_state_index] = 1.0
self.lastaction = None # for rendering
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Discrete(self.nS)
self.s = categorical_sample(self.isd, self.np_random)
def step(self, action):
transitions = self.P[self.s][action]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, d = transitions[i]
self.s = s
self.lastaction = action
return (int(s), r, d, {"prob": p})
def reset(
self,
*,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[dict] = None,
):
super().reset(seed=seed)
self.s = categorical_sample(self.isd, self.np_random)
self.lastaction = None
return int(self.s)
def render(self, mode='human', close=False):
outfile = StringIO() if mode == 'ansi' else sys.stdout
desc = np.asarray(['[' + ascii_uppercase[:self.shape[1] - 2] + ']'], dtype='c').tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
color = 'red' if self.s == 0 else 'green' if self.s == self.nS - 1 else 'yellow'
desc[0][self.s] = utils.colorize(desc[0][self.s], color, highlight=True)
outfile.write("\n")
outfile.write("\n".join(''.join(line) for line in desc)+"\n")
if mode != 'human':
return outfile
| StarcoderdataPython |
8061399 | <filename>sensors/veml6075.py
import time
import board
import busio
import logging
import adafruit_veml6075
i2c = busio.I2C(board.SCL, board.SDA)
veml = adafruit_veml6075.VEML6075(i2c, integration_time=100)
stardustLogger = logging.getLogger("StardustLogger")
def uv():
print("UV a:", veml.uva)
print("UV b:", veml.uvb)
print("UV index:", veml.uv_index)
def logUv():
stardustLogger.debug("veml6075.logUv()")
stardustLogger.info("UV a:" + str(veml.uva))
stardustLogger.info("UV b:" + str(veml.uvb))
stardustLogger.info("UV index:" + str(veml.uv_index))
| StarcoderdataPython |
3465009 | <filename>app/tests/test_app.py
import falcon
import pytest
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from falcon_apispec import FalconPlugin
from app.app import application
from app.db.database import StorageError
@pytest.fixture()
def app():
return falcon.API()
@pytest.fixture()
def spec(app):
# Set up documentation object
spec = APISpec(
title="Test",
version="1.0.0",
openapi_version="3.0.2",
plugins=[FalconPlugin(app), MarshmallowPlugin()],
)
return spec
class mockDB:
def setup(self):
return None
def test_app(app, spec):
with pytest.raises(StorageError):
application({}, {})
| StarcoderdataPython |
6552847 | """
Tests for the bootstrap_calcs.py file.
"""
import unittest
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.stats import norm, gumbel_r
import pylogit.bootstrap_calcs as bc
try:
# Python 3.x does not natively support xrange
from past.builtins import xrange
except ImportError:
pass
class ComputationalTests(unittest.TestCase):
def setUp(self):
"""
Note that the spatial test data used in many of these tests comes from
Efron, Bradley, and <NAME>. An Introduction to the
Bootstrap. CRC press, 1994. Chapter 14.
"""
# Determine the number of parameters and number of bootstrap replicates
num_replicates = 100
num_params = 5
# Create a set of fake bootstrap replicates
self.bootstrap_replicates =\
(np.arange(1, 1 + num_replicates)[:, None] *
np.arange(1, 1 + num_params)[None, :])
# Create a fake maximum likelihood parameter estimate
self.mle_params = self.bootstrap_replicates[50, :]
# Create a set of fake jackknife replicates
array_container = []
for est in self.mle_params:
array_container.append(gumbel_r.rvs(loc=est, size=10))
self.jackknife_replicates =\
np.concatenate([x[:, None] for x in array_container], axis=1)
# Create a fake confidence percentage.
self.conf_percentage = 94.88
# Store the spatial test data from Efron and Tibshirani (1994)
self.test_data =\
np.array([48, 36, 20, 29, 42, 42, 20, 42, 22, 41, 45, 14, 6,
0, 33, 28, 34, 4, 32, 24, 47, 41, 24, 26, 30, 41])
# Note how many test data observations there are.
num_test_obs = self.test_data.size
# Create the function to calculate the jackknife replicates.
def calc_theta(array):
result = ((array - array.mean())**2).sum() / float(array.size)
return result
self.calc_theta = calc_theta
self.test_theta_hat = np.array([calc_theta(self.test_data)])
# Create a pandas series of the data. Allows for easy case deletion.
raw_series = pd.Series(self.test_data)
# Create the array of jackknife replicates
jackknife_replicates = np.empty((num_test_obs, 1), dtype=float)
for obs in xrange(num_test_obs):
current_data = raw_series[raw_series.index != obs].values
jackknife_replicates[obs] = calc_theta(current_data)
self.test_jackknife_replicates = jackknife_replicates
return None
def test_calc_percentile_interval(self):
# Get the alpha percentage. Should be 5.12 so alpha / 2 should be 2.56
alpha = bc.get_alpha_from_conf_percentage(self.conf_percentage)
# These next 2 statements work because there are exactly 100 replicates
# We should have the value in BR[lower_row, 0] = 3 so that there are 2
# elements in bootstrap_replicates (BR) that are less than this. I.e.
# we want lower_row = 2. Note 2.56 rounded down is 2.
lower_row = int(np.floor(alpha / 2.0))
# 100 - 2.56 is 97.44. Rounded up, this is 98.
# We want the row such that the value in the first column of that row
# is 98, i.e. we want the row at index 97.
upper_row = int(np.floor(100 - (alpha / 2.0)))
# Create the expected results
expected_results =\
bc.combine_conf_endpoints(self.bootstrap_replicates[lower_row],
self.bootstrap_replicates[upper_row])
# Alias the function being tested
func = bc.calc_percentile_interval
# Get the function results
func_results = func(self.bootstrap_replicates, self.conf_percentage)
# Perform the desired tests
self.assertIsInstance(func_results, np.ndarray)
self.assertEqual(func_results.shape, expected_results.shape)
npt.assert_allclose(func_results, expected_results)
return None
def test_calc_bias_correction_bca(self):
# There are 100 bootstrap replicates, already in ascending order for
# each column. If we take row 51 to be the mle, then 50% of the
# replicates are less than the mle, and we should have bias = 0.
expected_result = np.zeros(self.mle_params.size)
# Alias the function to be tested.
func = bc.calc_bias_correction_bca
# Perform the desired test
func_result = func(self.bootstrap_replicates, self.mle_params)
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, expected_result.shape)
npt.assert_allclose(func_result, expected_result)
# Create a fake mle that should be higher than 95% of the results
fake_mle = self.bootstrap_replicates[95]
expected_result_2 = norm.ppf(0.95) * np.ones(self.mle_params.size)
func_result_2 = func(self.bootstrap_replicates, fake_mle)
self.assertIsInstance(func_result_2, np.ndarray)
self.assertEqual(func_result_2.shape, expected_result_2.shape)
npt.assert_allclose(func_result_2, expected_result_2)
return None
def test_calc_acceleration_bca(self):
# Get the expected result. See page 186 of Efron and Tibshirani (1994)
expected_result = np.array([0.061])
# Alias the function being tested
func = bc.calc_acceleration_bca
# Perform the desired test
func_result = func(self.test_jackknife_replicates)
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, expected_result.shape)
# Note the absolute tolerance of 5e-4 is used because the results
# should agree when rounded to 3 decimal places. This will be the case
# if the two sets of results agree to within 5e-4 of each other.
npt.assert_allclose(func_result, expected_result, atol=5e-4)
return None
def test_calc_lower_bca_percentile(self):
# Use the parameter values from
# Efron, Bradley, and <NAME>. An Introduction to the
# Bootstrap. CRC press, 1994. Pages 185-186
# Note that my alpha is Efron's alpha / 2, in percents not decimals
alpha_percent = 10
bias_correction = np.array([0.146])
acceleration = np.array([0.061])
# Note the expected results
expected_result = np.array([0.110])
# Alias the function being tested
func = bc.calc_lower_bca_percentile
# Perform the desired tests
# Note we divide the function results by 100 since our results are in
# terms of percents and Efron's results are in decimals.
func_result = func(alpha_percent, bias_correction, acceleration) / 100
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, expected_result.shape)
# Note the absolute tolerance of 5e-4 is used because the results
# should agree when rounded to 3 decimal places. This will be the case
# if the two sets of results agree to within 5e-4 of each other.
npt.assert_allclose(func_result, expected_result, atol=5e-4)
return None
def test_calc_upper_bca_percentile(self):
# Use the parameter values from
# Efron, Bradley, and <NAME>. An Introduction to the
# Bootstrap. CRC press, 1994. Pages 185-186
# Note that my alpha is Efron's alpha / 2, in percents not decimals
alpha_percent = 10
bias_correction = np.array([0.146])
acceleration = np.array([0.061])
# Note the expected results
expected_result = np.array([0.985])
# Alias the function being tested
func = bc.calc_upper_bca_percentile
# Perform the desired tests
# Note we divide the function results by 100 since our results are in
# terms of percents and Efron's results are in decimals.
func_result = func(alpha_percent, bias_correction, acceleration) / 100
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, expected_result.shape)
# Note the absolute tolerance of 1e-3 is used because the results
# should be within 0.001 of each other.
npt.assert_allclose(func_result, expected_result, atol=1e-3)
return None
def test_calc_bca_interval(self):
# Create the bootstrap replicates for the test data
num_test_reps = 5000
num_test_obs = self.test_data.size
test_indices = np.arange(num_test_obs)
boot_indx_shape = (num_test_reps, num_test_obs)
np.random.seed(8292017)
boot_indices =\
np.random.choice(test_indices,
replace=True,
size=num_test_obs*num_test_reps)
self.test_bootstrap_replicates =\
np.fromiter((self.calc_theta(self.test_data[x]) for x in
boot_indices.reshape(boot_indx_shape)),
dtype=float)[:, None]
# Note the expected result. See page 183 of Efron and Tibshirani (1994)
expected_result = np.array([[115.8], [259.6]])
# Bundle the necessary arguments
args = [self.test_bootstrap_replicates,
self.test_jackknife_replicates,
self.test_theta_hat,
90]
# Alias the function being tested
func = bc.calc_bca_interval
# Get the function results
func_result = func(*args)
# Perform the desired tests
# Note we divide the function results by 100 since our results are in
# terms of percents and Efron's results are in decimals.
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, expected_result.shape)
# Note the relative tolerance of 0.01 is used because the function
# results should be within 1% of the expected result. Note that some
# differences are expected due to simulation error on both the part of
# Efron and Tibshirani (1994) when they reported their results, and on
# our part when calculating the results.
npt.assert_allclose(func_result, expected_result, rtol=0.01)
return None
| StarcoderdataPython |
4877636 | <filename>easy/array/flood_fill/flood_fill.py
class Solution(object):
def __flood_fill_recursion(self, image, sr, sc, old_color, newColor):
image[sr][sc] = newColor
if sr > 0 and image[sr-1][sc] == old_color:
image = self.__flood_fill_recursion(
image, sr-1, sc, old_color, newColor)
if sr < len(image)-1 and image[sr+1][sc] == old_color:
image = self.__flood_fill_recursion(
image, sr+1, sc, old_color, newColor)
if sc > 0 and image[sr][sc-1] == old_color:
image = self.__flood_fill_recursion(
image, sr, sc-1, old_color, newColor)
if sc < len(image[0])-1 and image[sr][sc+1] == old_color:
image = self.__flood_fill_recursion(
image, sr, sc+1, old_color, newColor)
return image
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
old_color = image[sr][sc]
if old_color == newColor:
return image
return self.__flood_fill_recursion(image, sr, sc, old_color, newColor)
s = Solution()
print("Solution 1 : ", s.floodFill([[1, 1, 1], [1, 1, 0], [1, 0, 1]], 1, 1, 2))
print("Solution 1 : ", s.floodFill([[0, 0, 0], [0, 1, 1]], 1, 1, 1))
| StarcoderdataPython |
20951 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test update of link labels."""
from uuid import uuid4
from aiida.common import timezone
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_legacy_jobcalc_attrs(perform_migrations: PsqlDostoreMigrator):
"""Test update of link labels."""
# starting revision
perform_migrations.migrate_up('django@django_0042')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
user = user_model(
email='<EMAIL>',
first_name='John',
last_name='Doe',
institution='EPFL',
)
session.add(user)
session.commit()
node_process = node_model(
uuid=str(uuid4()),
node_type='process.calculation.calcjob.CalcJobNode.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
node_data = node_model(
uuid=str(uuid4()),
node_type='data.core.dict.Dict.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
session.add(node_process)
session.add(node_data)
session.commit()
link = link_model(
input_id=node_data.id,
output_id=node_process.id,
type='input',
label='_return',
)
session.add(link)
session.commit()
link_id = link.id
# final revision
perform_migrations.migrate_up('django@django_0043')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
link = session.get(link_model, link_id)
assert link.label == 'result'
| StarcoderdataPython |
3481685 | <gh_stars>1-10
from django.shortcuts import render
def entry_list(request):
return render(request, 'entry_list.html')
def entry_add(request):
return render(request, 'entry_add.html')
def entry(request, post_id):
return render(request, 'entry.html', {'id': post_id})
| StarcoderdataPython |
8055787 | <filename>dummy.py
#this is a dummy python script to check
#the proper installtion of the virtual env
#and other required modules
#this project uses python version 3.7.3
#=========required modules==========
#pip install virtualenv
#pip install --upgrade google-api-python-client
#pip install --upgrade google-auth-oauthlib google-auth-httplib2
#pip install requests
#pip install textblob
#pip install paralleldots
#install the abouve modules inside the vitualenv
print("hello im in a virtual env")
import csv
import os
import paralleldots
import google.oauth2.credentials
from textblob import TextBlob
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
#if there is no error after executing this file,
#you are good to go | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.