id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11389918 | # coding: utf-8
"""
SENSE-O Northbound Intent API
StackV SENSE-O Northbound REST API Documentation # noqa: E501
OpenAPI spec version: 2.0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SlimProfile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'uuid': 'str',
'name': 'str',
'owner': 'str',
'description': 'str',
'created': 'datetime',
'last_edited': 'datetime',
'authorized': 'bool',
'editable': 'bool',
'licenses': 'list[ProfileLicense]'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'owner': 'owner',
'description': 'description',
'created': 'created',
'last_edited': 'lastEdited',
'authorized': 'authorized',
'editable': 'editable',
'licenses': 'licenses'
}
def __init__(self, uuid=None, name=None, owner=None, description=None, created=None, last_edited=None, authorized=None, editable=None, licenses=None): # noqa: E501
"""SlimProfile - a model defined in Swagger""" # noqa: E501
self._uuid = None
self._name = None
self._owner = None
self._description = None
self._created = None
self._last_edited = None
self._authorized = None
self._editable = None
self._licenses = None
self.discriminator = None
self.uuid = uuid
self.name = name
if owner is not None:
self.owner = owner
if description is not None:
self.description = description
if created is not None:
self.created = created
if last_edited is not None:
self.last_edited = last_edited
if authorized is not None:
self.authorized = authorized
if editable is not None:
self.editable = editable
if licenses is not None:
self.licenses = licenses
@property
def uuid(self):
"""Gets the uuid of this SlimProfile. # noqa: E501
The profile's ID. # noqa: E501
:return: The uuid of this SlimProfile. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this SlimProfile.
The profile's ID. # noqa: E501
:param uuid: The uuid of this SlimProfile. # noqa: E501
:type: str
"""
if uuid is None:
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
self._uuid = uuid
@property
def name(self):
"""Gets the name of this SlimProfile. # noqa: E501
The profile's user-given name. # noqa: E501
:return: The name of this SlimProfile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SlimProfile.
The profile's user-given name. # noqa: E501
:param name: The name of this SlimProfile. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def owner(self):
"""Gets the owner of this SlimProfile. # noqa: E501
The username of the profile owner. # noqa: E501
:return: The owner of this SlimProfile. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this SlimProfile.
The username of the profile owner. # noqa: E501
:param owner: The owner of this SlimProfile. # noqa: E501
:type: str
"""
self._owner = owner
@property
def description(self):
"""Gets the description of this SlimProfile. # noqa: E501
The profile's description. # noqa: E501
:return: The description of this SlimProfile. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SlimProfile.
The profile's description. # noqa: E501
:param description: The description of this SlimProfile. # noqa: E501
:type: str
"""
self._description = description
@property
def created(self):
"""Gets the created of this SlimProfile. # noqa: E501
The profile's timestamp for creation. # noqa: E501
:return: The created of this SlimProfile. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this SlimProfile.
The profile's timestamp for creation. # noqa: E501
:param created: The created of this SlimProfile. # noqa: E501
:type: datetime
"""
self._created = created
@property
def last_edited(self):
"""Gets the last_edited of this SlimProfile. # noqa: E501
The timestamp for when the profile was last edited. # noqa: E501
:return: The last_edited of this SlimProfile. # noqa: E501
:rtype: datetime
"""
return self._last_edited
@last_edited.setter
def last_edited(self, last_edited):
"""Sets the last_edited of this SlimProfile.
The timestamp for when the profile was last edited. # noqa: E501
:param last_edited: The last_edited of this SlimProfile. # noqa: E501
:type: datetime
"""
self._last_edited = last_edited
@property
def authorized(self):
"""Gets the authorized of this SlimProfile. # noqa: E501
Whether the profile carries an admin's authorization with it. # noqa: E501
:return: The authorized of this SlimProfile. # noqa: E501
:rtype: bool
"""
return self._authorized
@authorized.setter
def authorized(self, authorized):
"""Sets the authorized of this SlimProfile.
Whether the profile carries an admin's authorization with it. # noqa: E501
:param authorized: The authorized of this SlimProfile. # noqa: E501
:type: bool
"""
self._authorized = authorized
@property
def editable(self):
"""Gets the editable of this SlimProfile. # noqa: E501
Whether the profile can be edited by licensed users. # noqa: E501
:return: The editable of this SlimProfile. # noqa: E501
:rtype: bool
"""
return self._editable
@editable.setter
def editable(self, editable):
"""Sets the editable of this SlimProfile.
Whether the profile can be edited by licensed users. # noqa: E501
:param editable: The editable of this SlimProfile. # noqa: E501
:type: bool
"""
self._editable = editable
@property
def licenses(self):
"""Gets the licenses of this SlimProfile. # noqa: E501
The profile's collection of given licenses. # noqa: E501
:return: The licenses of this SlimProfile. # noqa: E501
:rtype: list[ProfileLicense]
"""
return self._licenses
@licenses.setter
def licenses(self, licenses):
"""Sets the licenses of this SlimProfile.
The profile's collection of given licenses. # noqa: E501
:param licenses: The licenses of this SlimProfile. # noqa: E501
:type: list[ProfileLicense]
"""
self._licenses = licenses
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SlimProfile, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SlimProfile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3372899 | """ Contains a class for logic of the single channel plot dialog.
"""
import logging
from PyQt5 import QtWidgets
from meggie.utilities.dialogs.singleChannelDialogUi import Ui_singleChannelDialog
class SingleChannelDialog(QtWidgets.QDialog):
""" Contains logic for the single channel plot dialog.
"""
def __init__(self, parent, handler, title, ch_names,
scalings, units, ylims, default_legend_names):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_singleChannelDialog()
self.ui.setupUi(self)
self.handler = handler
self.scalings = scalings
self.units = units
self.ylims = ylims
self.default_legend_names = default_legend_names
# populate channel combobox
for ch_name in sorted(ch_names):
self.ui.comboBoxChannel.addItem(ch_name)
# populate title
self.ui.lineEditTitle.setText(title)
# populate legend settings
for legend_idx, legend_name in enumerate(default_legend_names):
label_item = QtWidgets.QLabel(self.ui.groupBoxLegend)
label_item.setText(legend_name)
self.ui.formLayoutLegend.setWidget(legend_idx,
QtWidgets.QFormLayout.LabelRole, label_item)
line_edit_item = QtWidgets.QLineEdit(self.ui.groupBoxLegend)
setattr(self.ui, 'lineEditItem_' + str(legend_idx),
line_edit_item)
line_edit_item.setText(legend_name)
self.ui.formLayoutLegend.setWidget(legend_idx,
QtWidgets.QFormLayout.FieldRole, line_edit_item)
def on_comboBoxChannel_currentTextChanged(self, item):
self.ui.doubleSpinBoxMin.setSuffix(' ' + self.units[item])
self.ui.doubleSpinBoxMax.setSuffix(' ' + self.units[item])
self.ui.doubleSpinBoxMin.setValue(self.ylims[item][0] *
self.scalings[item] * 1.05)
self.ui.doubleSpinBoxMax.setValue(self.ylims[item][1] *
self.scalings[item] * 1.05)
def accept(self):
ymax = self.ui.doubleSpinBoxMax.value()
ymin = self.ui.doubleSpinBoxMin.value()
ylim = (ymin, ymax)
ch_name = self.ui.comboBoxChannel.currentText()
window_len = self.ui.spinBoxWindowLength.value()
window = self.ui.comboBoxWindow.currentText()
title = self.ui.lineEditTitle.text()
legend = {}
for idx in range(len(self.default_legend_names)):
line_edit = getattr(self.ui, 'lineEditItem_' + str(idx))
legend[self.default_legend_names[idx]] = line_edit.text()
self.handler(ch_name, title, legend, ylim, window, window_len)
self.close()
| StarcoderdataPython |
239915 | <filename>ddqn/ddqn.py<gh_stars>0
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.models import Sequential, load_model, save_model, Model
from tensorflow.keras.optimizers import Adam
import numpy as np
class ReplayBuffer(object):
def __init__(self, max_size, input_shape, n_actions, discrete=False):
self.mem_size = max_size
self.mem_counter = 0
self.discrete = discrete
self.state_memory = np.empty((self.mem_size, input_shape))
self.next_state_memory = np.empty((self.mem_size, input_shape))
self.dtype = np.int8 if self.discrete else np.float32
self.action_memory = np.empty((self.mem_size, n_actions), dtype=self.dtype)
self.reward_memory = np.empty(self.mem_size)
self.terminal_memory = np.empty(self.mem_size, dtype=np.float32)
def store_transition(self, state, action, reward, state_, done):
index = self.mem_counter % self.mem_size
self.state_memory[index] = state
self.next_state_memory[index] = state_
if self.discrete:
actions = np.zeros(self.action_memory.shape[1])
actions[action] = 1.0
self.action_memory[index] = actions
else:
self.action_memory[index] = action
self.reward_memory[index] = reward
self.terminal_memory[index] = 1 - done
self.mem_counter += 1
def sample_buffer(self, batch_size):
max_mem = min(self.mem_counter, self.mem_size)
batch = np.random.choice(max_mem, batch_size)
states = self.state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
states_ = self.next_state_memory[batch]
terminal = self.terminal_memory[batch]
return states, actions, rewards, states_, terminal
def save_memory(self):
np.savetxt("history/state_history.csv", self.state_memory[:self.mem_counter], delimiter=",")
np.savetxt("history/action_history.csv", self.action_memory[:self.mem_counter], delimiter=",")
np.savetxt("history/reward_history.csv", self.reward_memory[:self.mem_counter], delimiter=",")
np.savetxt("history/next_state_history.csv", self.next_state_memory[:self.mem_counter], delimiter=",")
np.savetxt("history/terminal_history.csv", self.terminal_memory[:self.mem_counter], delimiter=",")
class Example_Buffer(object):
def __init__(self, location, episode_range, discrete=False):
self.episode_range = episode_range
self.discrete = discrete
self.dtype = np.int8 if self.discrete else np.float32
self.state_memory = np.genfromtxt("{}/state_history.csv".format(location), delimiter=",")
self.action_memory = np.genfromtxt("{}/action_history.csv".format(location), delimiter=",", dtype=self.dtype)
self.reward_memory = np.genfromtxt("{}/reward_history.csv".format(location), delimiter=",")
self.next_state_memory = np.genfromtxt("{}/next_state_history.csv".format(location), delimiter=",")
self.terminal_memory = np.genfromtxt("{}/terminal_history.csv".format(location), delimiter=",", dtype=np.float32)
self.episode_indexes = [0]
for i in range(len(self.terminal_memory)):
if self.terminal_memory[i] == 0:
self.episode_indexes.append(i+1)
self.episode_scores = self.example_parser()
self.episode_choice = []
for i in range(len(self.episode_scores)):
if self.episode_scores[i] >= self.episode_range[0] and self.episode_scores[i] <= self.episode_range[1]:
self.episode_choice.append(i)
if len(self.episode_choice) >= self.episode_range[2]:
break
print("Loading {} Episodes...".format(len(self.episode_choice)))
episode_state_memory = np.split(self.state_memory, self.episode_indexes[1:])
episode_action_memory = np.split(self.action_memory, self.episode_indexes[1:])
episode_reward_memory= np.split(self.reward_memory, self.episode_indexes[1:])
episode_next_state_memory= np.split(self.next_state_memory, self.episode_indexes[1:])
episode_terminal_memory = np.split(self.terminal_memory, self.episode_indexes[1:])
self.state_memory = np.concatenate([episode_state_memory[self.episode_choice[i]] for i in range(len(self.episode_choice))])
self.action_memory = np.concatenate([episode_action_memory[self.episode_choice[i]] for i in range(len(self.episode_choice))])
self.reward_memory = np.concatenate([episode_reward_memory[self.episode_choice[i]] for i in range(len(self.episode_choice))])
self.next_state_memory = np.concatenate([episode_next_state_memory[self.episode_choice[i]] for i in range(len(self.episode_choice))])
self.terminal_memory = np.concatenate([episode_terminal_memory[self.episode_choice[i]] for i in range(len(self.episode_choice))])
self.num_examples = len(self.action_memory)
self.mem_counter = len(self.action_memory)
self.episode_counter = 0
self.episode_indexes = []
for i in range(len(self.terminal_memory)):
if self.terminal_memory[i] == 0:
self.episode_indexes.append(i+1)
self.num_episodes = len(self.episode_indexes)+1
self.episode_scores = self.example_parser()
def sample_example(self, batch_size):
max_mem = self.mem_counter
batch = np.random.choice(max_mem, batch_size)
states = self.state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
states_ = self.next_state_memory[batch]
terminal = self.terminal_memory[batch]
return states, actions, rewards, states_, terminal
def example_reset(self):
return self.state_memory[self.mem_counter]
def example_step(self):
actions = self.action_memory[self.mem_counter]
states_ = self.next_state_memory[self.mem_counter]
rewards = self.reward_memory[self.mem_counter]
terminal = self.terminal_memory[self.mem_counter]
self.mem_counter += 1
if terminal == 0:
self.episode_counter += 1
return actions, states_, rewards, terminal, {}
def example_parser(self):
episode_scores = []
for i in range(1, len(self.episode_indexes)):
episode_scores.append(np.sum(self.reward_memory[range(self.episode_indexes[i-1], self.episode_indexes[i])]))
return episode_scores
def choice_score_range(self, range, episode_scores):
episode_choice = []
for i in range(len(episode_scores)):
if episode_scores[i] >= range[0] and episode_scores[i] <= range[1]:
episode_choice.append(i)
return episode_choice
def replay_add(self, agent):
for i in range(len(self.action_memory)):
agent.memory.store_transition(self.state_memory[i], self.action_memory[i], self.reward_memory[i], self.next_state_memory[i], self.terminal_memory[i])
print("Added prime data to experience replay")
def analyse_state_space(self):
minMax = []
for i in self.state_memory:
if len(minMax) == 0:
for y in range(len(i)):
minMax.append([i[y], i[y]])
for x in range(len(i)):
if i[x] < minMax[x][0]:
minMax[x][0] = i[x]
elif i[x] > minMax[x][1]:
minMax[x][1] = i[x]
return minMax
# Heavily influenced by: https://github.com/philtabor/Youtube-Code-Repository/blob/master/ReinforcementLearning/DeepQLearning/ddqn_keras.py
class DDQNAgent(object):
def __init__(self, alpha, gamma, n_actions, epsilon, batch_size,
input_dims, primesteps=0, episode_range=[0,0,0], example_location=None, epsilon_dec=0.9999, epsilon_end=0.0001,
mem_size=1000000, fname='ddqn_model.h5', replace_target=500, use_examples=False):
self.action_space = [i for i in range(n_actions)]
self.n_actions = n_actions
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_dec = epsilon_dec
self.epsilon_min = epsilon_end
self.batch_size = batch_size
self.model_file = fname
self.replace_target = replace_target
self.memory = ReplayBuffer(mem_size, input_dims, n_actions,discrete=True)
self.use_examples = use_examples
if self.use_examples:
self.example_memory = Example_Buffer(location=example_location, discrete=True, episode_range=episode_range)
self.name = "Example Agent"
self.primesteps = primesteps
else:
self.name = "Normal Agent"
self.q_eval = self.build_dqn(alpha, n_actions, input_dims, 256, 256)
self.q_target = self.build_dqn(alpha, n_actions, input_dims, 256, 256)
self.learning_counter = 0
self.primestep_counter = 0
def build_dqn(self, lr, n_actions, input_dims, fc1_dims, fc2_dims):
model = Sequential([
Dense(fc1_dims, input_shape=(input_dims,)),
Activation('relu'),
Dense(fc2_dims),
Activation('relu'),
Dense(n_actions)])
model.compile(optimizer=Adam(learning_rate=lr), loss='mse')
print(model.summary())
return model
def remember(self, state, action, reward, new_state, done):
self.memory.store_transition(state, action, reward, new_state, done)
def replay_add(self):
for i in range(len(self.example_memory.action_memory)):
self.remember(self.example_memory.state_memory[i], self.example_memory.action_memory[i], self.example_memory.reward_memory[i], \
self.example_memory.next_state_memory[i], self.example_memory.terminal_memory[i])
print("Added prime data to experience replay")
def choose_action(self, state):
state = state[np.newaxis, :]
rand = np.random.random()
if rand < self.epsilon:
action = np.random.choice(self.action_space)
else:
actions = self.q_eval.predict(state, use_multiprocessing=True)
action = np.argmax(actions)
return action
def learn(self):
if self.memory.mem_counter > self.batch_size:
state, action, reward, new_state, done = \
self.memory.sample_buffer(self.batch_size)
action_values = np.array(self.action_space, dtype=np.int8)
action_indices = np.dot(action, action_values)
q_next = self.q_target.predict(new_state, use_multiprocessing=True)
q_eval = self.q_eval.predict(new_state, use_multiprocessing=True)
q_pred = self.q_eval.predict(state, use_multiprocessing=True)
max_actions = np.argmax(q_eval, axis=1)
q_target = q_pred
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_target[batch_index, action_indices] = reward + \
self.gamma*q_next[batch_index, max_actions.astype(int)]*done
loss = self.q_eval.fit(state, q_target, verbose=0, use_multiprocessing=True)
self.epsilon = self.epsilon * self.epsilon_dec if self.epsilon > \
self.epsilon_min else self.epsilon_min
if self.memory.mem_counter % self.replace_target == 0:
self.update_network_parameters()
return loss
def prime(self):
state, action, reward, new_state, done = self.example_memory.sample_example(self.batch_size)
action_values = np.array(self.action_space, dtype=np.int8)
action_indices = np.dot(action, action_values)
q_next = self.q_target.predict(new_state, use_multiprocessing=True)
q_eval = self.q_eval.predict(new_state, use_multiprocessing=True)
q_pred = self.q_eval.predict(state, use_multiprocessing=True)
max_actions = np.argmax(q_eval, axis=1)
q_target = q_pred
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_target[batch_index, action_indices] = reward + self.gamma*q_next[batch_index, max_actions.astype(int)]*done
loss = self.q_eval.fit(state, q_target, verbose=0, use_multiprocessing=True)
if self.primestep_counter % self.replace_target == 0:
self.update_network_parameters()
self.primestep_counter += 1
return loss
def update_network_parameters(self):
for i in range(len(self.q_target.layers)):
self.q_target.get_layer(index=i).set_weights(self.q_eval.get_layer(index=i).get_weights())
def save_model(self):
self.q_eval.save(self.model_file)
def load_model(self):
self.q_eval = load_model(self.model_file)
if self.epsilon == 0.0:
self.update_network_parameters() | StarcoderdataPython |
8099876 | from service_capacity_modeling.capacity_planner import planner
from service_capacity_modeling.interface import CapacityDesires
from service_capacity_modeling.interface import DataShape
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import QueryPattern
def test_entity_increasing_qps_simple():
qps_values = (100, 1000, 10_000, 100_000)
entity_results_trend = []
for qps in qps_values:
simple = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=Interval(
low=qps // 10, mid=qps, high=qps * 10, confidence=0.98
),
estimated_write_per_second=Interval(
low=qps // 10, mid=qps, high=qps * 10, confidence=0.98
),
),
data_shape=DataShape(
estimated_state_size_gib=Interval(
low=20, mid=200, high=2000, confidence=0.98
),
),
)
cap_plan = planner.plan(
model_name="org.netflix.entity",
region="us-east-1",
desires=simple,
simulations=256,
)
# Check the Java cluster
entity_plan = next(filter(lambda c: c.cluster_type == 'dgwentity', cap_plan.least_regret[0].candidate_clusters.regional))
entity_results_trend.append(
(
entity_plan.count * entity_plan.instance.cpu,
)
)
# We just want ram and cpus for a java app
assert entity_plan.instance.family in ("m5", "r5")
# We should never be paying for ephemeral drives
assert entity_plan.instance.drive is None
# Should have more capacity as requirement increases
x = [r[0] for r in entity_results_trend]
assert x[0] < x[-1]
assert sorted(x) == x
| StarcoderdataPython |
12802756 | <filename>pinguin/views.py
from rest_framework import permissions, viewsets, generics, filters
from .serializers import JobsSerializer, HousingSerializer, ApplicantSerializer, HeatmapSerializer
from .models import Jobs, Housing, Applicant, Heatmap
from .data_collection.collect_data import CollectData
from django.shortcuts import render
debug = False
if(debug):
apa = CollectData()
class JobsViewSet(viewsets.ModelViewSet):
queryset = Jobs.objects.all().order_by('company')
serializer_class = JobsSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('city', 'city')
class HousingViewSet(viewsets.ModelViewSet):
queryset = Housing.objects.all().order_by('address')
serializer_class = HousingSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('city', 'city')
class ApplicantViewSet(viewsets.ModelViewSet):
queryset = Applicant.objects.all().order_by('name')
serializer_class = ApplicantSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class HeatmapViewSet(viewsets.ModelViewSet):
queryset = Heatmap.objects.all().order_by('occupation')
serializer_class = HeatmapSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('occupation', 'occupation')
def index(request):
return render(request, 'pinguin/index.html') | StarcoderdataPython |
11317853 | from __future__ import annotations
import datetime
import json
import typing
import urllib.parse
import attr
from cattr import Converter
from bloom.ll._compat import Literal
from bloom.ll.models.application import Application
from bloom.ll.models.application_commands import (
ApplicationCommand,
ApplicationCommandOption,
ApplicationCommandPermissions,
CommandTypes,
GuildApplicationCommandPermissions,
InteractionResponse,
)
from bloom.ll.models.audit_log import AuditLog, AuditLogEvents
from bloom.ll.models.base import UNKNOWN, UNKNOWN_TYPE, Snowflake, Unknownish
from bloom.ll.models.channel import (
AllowedMentions,
Attachment,
Channel,
ChannelTypes,
Embed,
FollowedChannel,
MessageFlags,
MessageReference,
Overwrite,
ThreadMember,
VideoQualityModes,
)
from bloom.ll.models.emoji import Emoji
from bloom.ll.models.gateway import DetailedGatewayResponse, GatewayResponse
from bloom.ll.models.guild import (
Ban,
DefaultMessageNotificationLevel,
ExplicitContentFilterLevel,
Guild,
GuildFeatures,
GuildMember,
GuildPreview,
GuildScheduledEventUser,
GuildWidget,
Integration,
ModifyGuildChannelPositionsParameters,
ModifyGuildRolePositionsParameters,
PruneCount,
SystemChannelFlags,
UserConnection,
VerificationLevel,
WelcomeScreen,
WelcomeScreenChannel,
WidgetStyleOptions,
)
from bloom.ll.models.guild_scheduled_events import (
EventStatus,
GuildScheduledEvent,
GuildScheduledEventEntityMetadata,
GuildScheduledEventEntityType,
GuildScheduledEventPrivacyLevel,
)
from bloom.ll.models.guild_template import GuildTemplate
from bloom.ll.models.invite import Invite, InviteMetadata, InviteTargetTypes
from bloom.ll.models.message import Message
from bloom.ll.models.message_components import Component
from bloom.ll.models.oauth2 import AuthorizationInformation
from bloom.ll.models.permissions import BitwisePermissionFlags, Role
from bloom.ll.models.stage_instance import PrivacyLevel, StageInstance
from bloom.ll.models.sticker import NitroStickerPacks, Sticker
from bloom.ll.models.user import User
from bloom.ll.models.voice import VoiceRegion
from bloom.ll.models.webhook import Webhook
from bloom.ll.rest.models import Request
def prepare(rest: RawRest, input_dict: typing.Dict[str, object]) -> typing.Dict[str, object]:
res: typing.Dict[str, object] = rest.conv.unstructure(
{k: v for k, v in input_dict.items() if v is not UNKNOWN}
)
return res
T = typing.TypeVar('T')
def tuple_(
it: Unknownish[typing.Optional[typing.Iterable[T]]],
) -> Unknownish[typing.Optional[typing.Tuple[T, ...]]]:
if isinstance(it, UNKNOWN_TYPE):
return UNKNOWN
else:
if it is None:
return None
else:
return tuple(it)
@typing.overload
def parse_reason(reason: str) -> str:
...
@typing.overload
def parse_reason(reason: Unknownish[str]) -> Unknownish[str]:
...
def parse_reason(reason: Unknownish[str]) -> Unknownish[str]:
if isinstance(reason, UNKNOWN_TYPE):
return reason
else:
return urllib.parse.quote(reason, safe=":/?#[]@!$&'()*+,;=")
@attr.define()
class RawRest:
# every single API method.
conv: Converter
def get_guild_audit_log(
self,
guild_id: Snowflake,
*,
user_id: Snowflake,
action_type: AuditLogEvents,
before: Snowflake,
limit: int,
) -> Request[AuditLog]:
return Request[AuditLog](
'GET',
'/guilds/{guild_id}/audit-logs',
{'guild_id': guild_id},
params=prepare(
self,
{
'user_id': user_id,
'action_type': action_type,
'before': before,
'limit': limit,
},
),
)
def get_channel(self, channel_id: Snowflake) -> Request[Channel]:
return Request[Channel]('GET', '/channels/{channel_id}', {'channel_id': channel_id})
def modify_channel(
self,
channel_id: Snowflake,
*,
# TODO: mypy_extensions.Expand[TypedDict] might help.
name: Unknownish[str] = UNKNOWN,
# base64 encoded icon
icon: Unknownish[str] = UNKNOWN,
type: Unknownish[ChannelTypes] = UNKNOWN,
position: Unknownish[typing.Optional[int]] = UNKNOWN,
topic: Unknownish[typing.Optional[str]] = UNKNOWN,
nsfw: Unknownish[typing.Optional[bool]] = UNKNOWN,
rate_limit_per_user: Unknownish[typing.Optional[int]] = UNKNOWN,
bitrate: Unknownish[typing.Optional[int]] = UNKNOWN,
user_limit: Unknownish[typing.Optional[int]] = UNKNOWN,
permission_overwrites: Unknownish[typing.Optional[typing.Iterable[Overwrite]]] = UNKNOWN,
parent_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
rtc_region: Unknownish[typing.Optional[str]] = UNKNOWN,
video_quality_mode: Unknownish[typing.Optional[VideoQualityModes]] = UNKNOWN,
default_auto_archive_duration: Unknownish[typing.Optional[int]] = UNKNOWN,
# thread options (TODO: an ADT method?)
archived: Unknownish[bool] = UNKNOWN,
auto_archive_duration: Unknownish[int] = UNKNOWN,
locked: Unknownish[bool] = UNKNOWN,
# audit log
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'PATCH',
'/channels/{channel_id}',
{'channel_id': channel_id},
json=prepare(
self,
{
'name': name,
'icon': icon,
'type': type,
'position': position,
'topic': topic,
'nsfw': nsfw,
'rate_limit_per_user': rate_limit_per_user,
'bitrate': bitrate,
'user_limit': user_limit,
'permission_overwrites': tuple_(permission_overwrites),
'parent_id': parent_id,
'rtc_region': rtc_region,
'video_quality_mode': video_quality_mode,
'default_auto_archive_duration': default_auto_archive_duration,
'archived': archived,
'auto_archive_duration': auto_archive_duration,
'locked': locked,
'rate_limit_per_user': rate_limit_per_user,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_channel(
self, channel_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[Channel]:
return Request[Channel](
'DELETE',
'/channels/{channel_id}',
{'channel_id': channel_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_channel_messages(
self,
channel_id: Snowflake,
*,
around: Unknownish[Snowflake] = UNKNOWN,
before: Unknownish[Snowflake] = UNKNOWN,
after: Unknownish[Snowflake] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[Message]]:
return Request[typing.Tuple[Message]](
'GET',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
params=prepare(
self,
{
'around': around,
'before': before,
'after': after,
'limit': limit,
},
),
)
def get_channel_message(
self, channel_id: Snowflake, message_id: Snowflake
) -> Request[Message]:
return Request[Message](
'GET',
'/channels/{channel.id}/messages/{message.id}',
{'channel_id': channel_id, 'message_id': message_id},
)
def create_message(
self,
channel_id: Snowflake,
*,
# one of these is required:
content: Unknownish[str] = UNKNOWN,
files: Unknownish[typing.Iterable[object]] = UNKNOWN, # TODO: better file type?
embeds: Unknownish[typing.Iterable[Embed]] = UNKNOWN,
sticker_ids: Unknownish[typing.Iterable[Snowflake]] = UNKNOWN,
# optional
tts: Unknownish[bool] = UNKNOWN,
allowed_mentions: Unknownish[AllowedMentions] = UNKNOWN,
message_reference: Unknownish[MessageReference] = UNKNOWN,
components: Unknownish[typing.Iterable[Component]] = UNKNOWN,
# TODO: partial attachments
attachments: Unknownish[typing.Iterable[typing.Dict[str, typing.Any]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'sticker_ids': tuple_(sticker_ids),
'tts': tts,
'allowed_mentions': allowed_mentions,
'message_reference': message_reference,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[Message](
'POST',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def crosspost_message(self, channel_id: Snowflake, message_id: Snowflake) -> Request[Message]:
return Request[Message](
'POST',
'/channels/{channel_id}/messages/{message_id}/crosspost',
{'channel_id': channel_id, 'message_id': message_id},
)
# TODO: better emoji type?
def create_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def delete_own_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def delete_user_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str, user_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{user_id}',
{
'channel_id': channel_id,
'message_id': message_id,
'emoji': emoji,
'user_id': user_id,
},
)
def get_reactions(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
emoji: str,
after: Unknownish[Snowflake] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[User]]:
return Request[typing.Tuple[User]](
'GET',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
params=prepare(self, {'after': after, 'limit': limit}),
)
def delete_all_reactions(self, channel_id: Snowflake, message_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions',
{'channel_id': channel_id, 'message_id': message_id},
)
def delete_all_reactions_for_emoji(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def edit_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
content: Unknownish[typing.Optional[str]] = UNKNOWN,
embeds: Unknownish[typing.Optional[typing.Iterable[Embed]]] = UNKNOWN,
flags: Unknownish[typing.Optional[MessageFlags]] = UNKNOWN,
# TODO: better file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
allowed_mentions: Unknownish[typing.Optional[AllowedMentions]] = UNKNOWN,
# TODO: are partial attachments allowed?
attachments: Unknownish[typing.Optional[typing.Iterable[Attachment]]] = UNKNOWN,
components: Unknownish[typing.Optional[typing.Iterable[Component]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'flags': flags,
'allowed_mentions': allowed_mentions,
'attachments': tuple_(attachments),
'components': tuple_(components),
},
)
return Request[Message](
'POST',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def delete_message(self, channel_id: Snowflake, message_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
)
def bulk_delete_messages(
self,
channel_id: Snowflake,
*,
messages: typing.Iterable[Snowflake],
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'POST',
'/channels/{channel_id}/messages/bulk-delete',
{'channel_id': channel_id},
json=prepare(self, {'messages': tuple_(messages)}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def edit_channel_permissions(
self,
channel_id: Snowflake,
overwrite_id: Snowflake,
*,
allow: BitwisePermissionFlags,
deny: BitwisePermissionFlags,
type: Literal[0, 1],
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/permissions/{overwrite_id}',
{'channel_id': channel_id, 'overwrite_id': overwrite_id},
json=prepare(self, {'allow': allow, 'deny': deny, 'type': type}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_channel_invites(self, channel_id: Snowflake) -> Request[typing.Tuple[InviteMetadata]]:
return Request[typing.Tuple[InviteMetadata]](
'GET', '/channels/{channel_id}/invites', {'channel_id': channel_id}
)
def create_channel_invite(
self,
channel_id: Snowflake,
*,
max_age: Unknownish[int] = UNKNOWN,
max_uses: Unknownish[int] = UNKNOWN,
temporary: Unknownish[bool] = UNKNOWN,
unique: Unknownish[bool] = UNKNOWN,
target_type: Unknownish[InviteTargetTypes] = UNKNOWN,
target_user_id: Unknownish[Snowflake] = UNKNOWN,
target_application_id: Unknownish[Snowflake] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Invite]:
return Request[Invite](
'POST',
'/channels/{channel_id}/invites',
{'channel_id': channel_id},
json=prepare(
self,
{
'max_age': max_age,
'max_uses': max_uses,
'temporary': temporary,
'unique': unique,
'target_type': target_type,
'target_user_id': target_user_id,
'target_application_id': target_application_id,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_channel_permission(
self, channel_id: Snowflake, overwrite_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/permissions/{overwrite_id}',
{'channel_id': channel_id, 'overwrite_id': overwrite_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def follow_news_channel(
self, channel_id: Snowflake, *, webhook_channel_id: Snowflake
) -> Request[FollowedChannel]:
return Request[FollowedChannel](
'POST',
'/channels/{channel_id}/followers',
{'channel_id': channel_id},
json=prepare(self, {'webhook_channel_id': webhook_channel_id}),
)
def trigger_typing_indicator(self, channel_id: Snowflake) -> Request[None]:
return Request[None]('POST', '/channels/{channel_id}/typing', {'channel_id': channel_id})
def get_pinned_messages(self, channel_id: Snowflake) -> Request[typing.Tuple[Message]]:
return Request[typing.Tuple[Message]](
'GET', '/channels/{channel_id}/pins', {'channel_id': channel_id}
)
def pin_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/pins/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def unpin_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: what does this return?
def group_dm_add_recipient(
self,
channel_id: Snowflake,
user_id: Snowflake,
*,
access_token: str,
# ????????? I think this is optional (Unknownish)
# TODO: test.
nick: str,
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/recipients/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
json=prepare(self, {'access_token': access_token, 'nick': nick}),
)
# TODO: what does this return?
def group_dm_remove_recipient(
self, channel_id: Snowflake, user_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/recipients/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
)
def start_thread_with_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
name: str,
auto_archive_duration: Unknownish[int] = UNKNOWN,
rate_limit_per_user: Unknownish[typing.Optional[int]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'POST',
'/channels/{channel_id}/messages/{message_id}/threads',
{'channel_id': channel_id, 'message_id': message_id},
json=prepare(self, {'name': name, 'auto_archive_duration': auto_archive_duration}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def start_thread_without_message(
self,
channel_id: Snowflake,
*,
name: str,
auto_archive_duration: Unknownish[int],
type: ChannelTypes,
invitable: bool,
rate_limit_per_user: Unknownish[typing.Optional[int]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'POST',
'/channels/{channel_id}/threads',
{'channel_id': channel_id},
json=prepare(
self, {'name': name, 'auto_archive_duration': auto_archive_duration, 'type': type}
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def join_thread(self, channel_id: Snowflake) -> Request[None]:
return Request[None](
'PUT', '/channels/{channel_id}/thread-members/@me', {'channel_id': channel_id}
)
def add_thread_member(self, channel_id: Snowflake, user_id: Snowflake) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/thread-members/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
)
def leave_thread(self, channel_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE', '/channels/{channel_id}/thread-members/@me', {'channel_id': channel_id}
)
def remove_thread_member(self, channel_id: Snowflake, user_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/thread-members/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
)
def get_thread_member(
self, channel_id: Snowflake, user_id: Snowflake
) -> Request[ThreadMember]:
return Request[ThreadMember](
'GET',
'/channels/{channel_id}/thread-members/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
)
def list_thread_members(self, channel_id: Snowflake) -> Request[typing.Tuple[ThreadMember]]:
return Request[typing.Tuple[ThreadMember]](
'GET', '/channels/{channel_id}/thread-members', {'channel_id': channel_id}
)
# TODO: this doesn't return a channel, this returns a thread...
# ADT?
def list_public_archived_threads(
self,
channel_id: Snowflake,
*,
before: Unknownish[datetime.datetime] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[Channel]]:
return Request[typing.Tuple[Channel]](
'GET',
'/channels/{channel.id}/threads/archived/public',
{'channel_id': channel_id},
params=prepare(self, {'before': before, 'limit': limit}),
)
# TODO: this doesn't return a channel, this returns a thread...
# ADT?
def list_private_archived_threads(
self,
channel_id: Snowflake,
*,
before: Unknownish[datetime.datetime] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[Channel]]:
return Request[typing.Tuple[Channel]](
'GET',
'/channels/{channel.id}/threads/archived/private',
{'channel_id': channel_id},
params=prepare(self, {'before': before, 'limit': limit}),
)
# TODO: this doesn't return a channel, this returns a thread...
# ADT?
def list_joined_private_archived_threads(
self,
channel_id: Snowflake,
*,
before: Unknownish[Snowflake] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[Channel]]:
return Request[typing.Tuple[Channel]](
'GET',
'/channels/{channel_id}/users/@me/threads/archived/private',
{'channel_id': channel_id},
params=prepare(self, {'before': before, 'limit': limit}),
)
def list_guild_emojis(self, guild_id: Snowflake) -> Request[typing.Tuple[Emoji]]:
return Request[typing.Tuple[Emoji]](
'GET', '/guilds/{guild_id}/emojis', {'guild_id': guild_id}
)
def get_guild_emoji(self, guild_id: Snowflake, emoji_id: Snowflake) -> Request[Emoji]:
return Request[Emoji](
'GET',
'/guilds/{guild_id}/emojis/{emoji_id}',
{'guild_id': guild_id, 'emoji_id': emoji_id},
)
def create_guild_emoji(
self,
guild_id: Snowflake,
*,
name: str,
# https://discord.com/developers/docs/reference#image-data
image: str,
roles: Unknownish[typing.Iterable[Snowflake]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Emoji]:
return Request[Emoji](
'POST',
'/guilds/{guild_id}/emojis',
{'guild_id': guild_id},
json=prepare(self, {'name': name, 'image': image, 'roles': tuple_(roles)}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_guild_emoji(
self,
guild_id: Snowflake,
emoji_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
roles: Unknownish[typing.Optional[typing.Iterable[Snowflake]]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Emoji]:
return Request[Emoji](
'PATCH',
'/guilds/{guild_id}/emojis/{emoji_id}',
{'guild_id': guild_id, 'emoji_id': emoji_id},
json=prepare(self, {'name': name, 'roles': tuple_(roles)}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_guild_emoji(
self, guild_id: Snowflake, emoji_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/emojis/{emoji_id}',
{'guild_id': guild_id, 'emoji_id': emoji_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def create_guild(
self,
*,
name: str,
# todo: this is deprecated?
region: Unknownish[typing.Optional[str]] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
icon: Unknownish[str] = UNKNOWN,
verification_level: Unknownish[VerificationLevel] = UNKNOWN,
default_message_notifications: Unknownish[DefaultMessageNotificationLevel] = UNKNOWN,
explicit_content_filter: Unknownish[ExplicitContentFilterLevel] = UNKNOWN,
roles: Unknownish[typing.Iterable[Role]] = UNKNOWN,
# TODO: partial channel objects
channels: Unknownish[typing.Iterable[object]] = UNKNOWN,
afk_channel_id: Unknownish[Snowflake] = UNKNOWN,
afk_timeout: Unknownish[int] = UNKNOWN,
system_channel_id: Unknownish[Snowflake] = UNKNOWN,
system_channel_flags: Unknownish[int] = UNKNOWN,
) -> Request[Guild]:
return Request[Guild](
'POST',
'/guilds',
{},
json=prepare(
self,
{
'name': name,
'region': region,
'icon': icon,
'verification_level': verification_level,
'default_message_notifications': default_message_notifications,
'explicit_content_filter': explicit_content_filter,
'roles': tuple_(roles),
'channels': tuple_(channels),
'afk_channel_id': afk_channel_id,
'afk_timeout': afk_timeout,
'system_channel_id': system_channel_id,
'system_channel_flags': system_channel_flags,
},
),
)
def get_guild(self, guild_id: Snowflake, *, with_counts: bool) -> Request[Guild]:
return Request[Guild](
'GET',
'/guilds/{guild_id}',
{'guild_id': guild_id},
params={'with_counts': with_counts},
)
def get_guild_preview(self, guild_id: Snowflake) -> Request[GuildPreview]:
return Request[GuildPreview]('GET', '/guilds/{guild_id}/preview', {'guild_id': guild_id})
def modify_guild(
self,
guild_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
region: Unknownish[typing.Optional[str]] = UNKNOWN,
verification_level: Unknownish[typing.Optional[VerificationLevel]] = UNKNOWN,
default_message_notifications: Unknownish[
typing.Optional[DefaultMessageNotificationLevel]
] = UNKNOWN,
explicit_content_filter: Unknownish[typing.Optional[ExplicitContentFilterLevel]] = UNKNOWN,
afk_channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
afk_timeout: Unknownish[int] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
icon: Unknownish[typing.Optional[str]] = UNKNOWN,
owner_id: Unknownish[Snowflake] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
splash: Unknownish[typing.Optional[str]] = UNKNOWN,
discovery_splash: Unknownish[typing.Optional[str]] = UNKNOWN,
banner: Unknownish[typing.Optional[str]] = UNKNOWN,
system_channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
system_channel_flags: Unknownish[SystemChannelFlags] = UNKNOWN,
rules_channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
public_updates_channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
preferred_locale: Unknownish[typing.Optional[str]] = UNKNOWN,
features: Unknownish[typing.Iterable[GuildFeatures]] = UNKNOWN,
description: Unknownish[typing.Optional[str]] = UNKNOWN,
premium_progress_bar_enabled: Unknownish[typing.Optional[bool]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Guild]:
return Request[Guild](
'PATCH',
'/guilds/{guild_id}',
{'guild_id': guild_id},
json=prepare(
self,
{
'name': name,
'region': region,
'verification_level': verification_level,
'default_message_notifications': default_message_notifications,
'explicit_content_filter': explicit_content_filter,
'afk_channel_id': afk_channel_id,
'afk_timeout': afk_timeout,
'icon': icon,
'owner_id': owner_id,
'splash': splash,
'discovery_splash': discovery_splash,
'banner': banner,
'system_channel_id': system_channel_id,
'system_channel_flags': system_channel_flags,
'rules_channel_id': rules_channel_id,
'public_updates_channel_id': public_updates_channel_id,
'preferred_locale': preferred_locale,
'features': tuple_(features),
'description': description,
'premium_progress_bar_enabled': premium_progress_bar_enabled,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_guild(self, guild_id: Snowflake) -> Request[None]:
return Request[None]('DELETE', '/guilds/{guild_id}', {'guild_id': guild_id})
def get_guild_channels(self, guild_id: Snowflake) -> Request[typing.Tuple[Channel]]:
return Request[typing.Tuple[Channel]](
'GET', '/guilds/{guild_id}/channels', {'guild_id': guild_id}
)
def create_guild_channel(
self,
guild_id: Snowflake,
*,
name: str,
type: Unknownish[ChannelTypes] = UNKNOWN,
topic: Unknownish[str] = UNKNOWN,
rate_limit_per_user: Unknownish[int] = UNKNOWN,
position: Unknownish[int] = UNKNOWN,
permission_overwrites: Unknownish[typing.Iterable[Overwrite]] = UNKNOWN,
parent_id: Unknownish[Snowflake] = UNKNOWN,
nsfw: Unknownish[bool] = UNKNOWN,
# voice only (TODO: typing override)
bitrate: Unknownish[int] = UNKNOWN,
user_limit: Unknownish[int] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'POST',
'/guilds/{guild_id}/channels',
{'guild_id': guild_id},
json=prepare(
self,
{
'name': name,
'type': type,
'topic': topic,
'bitrate': bitrate,
'user_limit': user_limit,
'rate_limit_per_user': rate_limit_per_user,
'position': position,
'permission_overwrites': tuple_(permission_overwrites),
'parent_id': parent_id,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_guild_channel_permissions(
self,
guild_id: Snowflake,
*,
params: typing.Iterable[ModifyGuildChannelPositionsParameters],
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PATCH',
'/guilds/{guild_id}/channels',
{'guild_id': guild_id},
json=self.conv.unstructure(tuple_(params)),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: this returns threads not channels... ADT?
def list_active_threads(self, guild_id: Snowflake) -> Request[typing.Tuple[Channel]]:
return Request[typing.Tuple[Channel]](
'GET', '/guilds/{guild_id}/threads/active', {'guild_id': guild_id}
)
def get_guild_member(self, guild_id: Snowflake, user_id: Snowflake) -> Request[GuildMember]:
return Request[GuildMember](
'GET',
'/guilds/{guild_id}/members/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
)
def list_guild_members(self, guild_id: Snowflake) -> Request[typing.Tuple[GuildMember]]:
return Request[typing.Tuple[GuildMember]](
'GET', '/guilds/{guild_id}/members', {'guild_id': guild_id}
)
def search_guild_members(
self, guild_id: Snowflake, *, query: str, limit: Unknownish[int] = UNKNOWN
) -> Request[typing.Tuple[GuildMember]]:
return Request[typing.Tuple[GuildMember]](
'GET',
'/guilds/{guild_id}/members/search',
{'guild_id': guild_id},
params=prepare(self, {'query': query, 'limit': limit}),
)
# TODO: make sure to support unions/optionals!
def add_guild_member(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
access_token: str,
# requires `MANAGE_NICKNAMES`
nick: Unknownish[str] = UNKNOWN,
# requires `MANAGE_ROLES`
roles: Unknownish[typing.Tuple[Snowflake]] = UNKNOWN,
# requires `MUTE_MEMBERS`
mute: Unknownish[bool] = UNKNOWN,
# requires `DEAFEN_MEMBERS`
deaf: Unknownish[bool] = UNKNOWN,
) -> Request[typing.Optional[GuildMember]]:
return Request[typing.Optional[GuildMember]](
'PUT',
'/guilds/{guild_id}/members/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
json=prepare(
self,
{
'access_token': access_token,
'nick': nick,
'roles': roles,
'mute': mute,
'deaf': deaf,
},
),
)
def modify_guild_member(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
nick: Unknownish[typing.Optional[str]] = UNKNOWN,
roles: Unknownish[typing.Optional[typing.Tuple[Snowflake]]] = UNKNOWN,
mute: Unknownish[typing.Optional[bool]] = UNKNOWN,
deaf: Unknownish[typing.Optional[bool]] = UNKNOWN,
channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildMember]:
return Request[GuildMember](
'PATCH',
'/guilds/{guild_id}/members/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
json=prepare(
self,
{
'nick': nick,
'roles': roles,
'mute': mute,
'deaf': deaf,
'channel_id': channel_id,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_current_member(
self,
guild_id: Snowflake,
*,
nick: Unknownish[typing.Optional[str]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildMember]:
return Request[GuildMember](
'PATCH',
'/guilds/{guild_id}/members/@me',
{'guild_id': guild_id},
json=prepare(self, {'nick': nick}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_current_user_nick(
self,
guild_id: Snowflake,
*,
# TODO: why ON EARTH is this not required in the api???
nick: Unknownish[typing.Optional[str]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildMember]:
# deprecated for modify current member
return Request[GuildMember](
'PATCH',
'/guilds/{guild_id}/members/@me/nick',
{'guild_id': guild_id},
json=prepare(self, {'nick': nick}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def add_guild_member_role(
self,
guild_id: Snowflake,
user_id: Snowflake,
role_id: Snowflake,
*,
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PUT',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
{'guild_id': guild_id, 'user_id': user_id, 'role_id': role_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def remove_guild_member_role(
self,
guild_id: Snowflake,
user_id: Snowflake,
role_id: Snowflake,
*,
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
{'guild_id': guild_id, 'user_id': user_id, 'role_id': role_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def remove_guild_member(
self, guild_id: Snowflake, user_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/members/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_guild_bans(self, guild_id: Snowflake) -> Request[typing.Tuple[Ban]]:
return Request[typing.Tuple[Ban]]('GET', '/guilds/{guild_id}/bans', {'guild_id': guild_id})
def get_guild_ban(self, guild_id: Snowflake, user_id: Snowflake) -> Request[Ban]:
return Request[Ban](
'GET', '/guilds/{guild_id}/bans/{user_id}', {'guild_id': guild_id, 'user_id': user_id}
)
def create_guild_ban(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
delete_message_days: Unknownish[int] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PUT',
'/guilds/{guild_id}/bans/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
json=prepare(self, {'delete_message_days': delete_message_days}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def remove_guild_ban(
self, guild_id: Snowflake, user_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/bans/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_guild_roles(self, guild_id: Snowflake) -> Request[typing.Tuple[Role]]:
return Request[typing.Tuple[Role]](
'GET', '/guilds/{guild_id}/roles', {'guild_id': guild_id}
)
def create_guild_role(
self,
guild_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
permissions: Unknownish[BitwisePermissionFlags] = UNKNOWN,
color: Unknownish[int] = UNKNOWN,
hoist: Unknownish[bool] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
# TODO: are these nullable
icon: Unknownish[str] = UNKNOWN,
unicode_emoji: Unknownish[str] = UNKNOWN,
mentionable: Unknownish[bool] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Role]:
return Request[Role](
'POST',
'/guilds/{guild_id}/roles',
{'guild_id': guild_id},
json=prepare(
self,
{
'name': name,
'permissions': permissions,
'color': color,
'hoist': hoist,
'mentionable': mentionable,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_guild_role_positions(
self,
guild_id: Snowflake,
*,
parameters: typing.Tuple[ModifyGuildRolePositionsParameters],
reason: Unknownish[str] = UNKNOWN,
) -> Request[typing.Tuple[Role]]:
return Request[typing.Tuple[Role]](
'PATCH',
'/guilds/{guild_id}/roles',
{'guild_id': guild_id},
json=self.conv.unstructure(parameters),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_guild_role(
self,
guild_id: Snowflake,
role_id: Snowflake,
*,
name: Unknownish[typing.Optional[str]] = UNKNOWN,
permissions: Unknownish[typing.Optional[BitwisePermissionFlags]] = UNKNOWN,
color: Unknownish[typing.Optional[int]] = UNKNOWN,
hoist: Unknownish[typing.Optional[bool]] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
icon: Unknownish[typing.Optional[str]] = UNKNOWN,
unicode_emoji: Unknownish[typing.Optional[str]] = UNKNOWN,
mentionable: Unknownish[typing.Optional[bool]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Role]:
return Request[Role](
'PATCH',
'/guilds/{guild_role}/roles/{role_id}',
{'guild_id': guild_id, 'role_id': role_id},
json=prepare(
self,
{
'name': name,
'permissions': permissions,
'color': color,
'hoist': hoist,
'mentionable': mentionable,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_guild_role(
self, guild_id: Snowflake, role_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/roles/{role_id}',
{'guild_id': guild_id, 'role_id': role_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: should this get a seperate object since thing inside cannot be None?
def get_guild_prune_count(
self,
guild_id: Snowflake,
*,
days: Unknownish[int] = UNKNOWN,
include_roles: Unknownish[typing.Iterable[Snowflake]] = UNKNOWN,
) -> Request[PruneCount]:
return Request[PruneCount](
'GET',
'/guilds/{guild_id}/prune',
{'guild_id': guild_id},
json=prepare(
self,
{
'days': days,
'include_roles': (
','.join(map(str, include_roles))
if not isinstance(include_roles, UNKNOWN_TYPE)
else include_roles
),
},
),
)
def begin_guild_prune(
self,
guild_id: Snowflake,
*,
days: Unknownish[int] = UNKNOWN,
compute_prune_count: Unknownish[bool] = UNKNOWN,
include_roles: Unknownish[typing.Iterable[Snowflake]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[PruneCount]:
return Request[PruneCount](
'POST',
'/guilds/{guild_id}/prune',
{'guild_id': guild_id},
json=prepare(
self,
{
'days': days,
'compute_prune_count': compute_prune_count,
'include_roles': (
','.join(map(str, include_roles))
if not isinstance(include_roles, UNKNOWN_TYPE)
else include_roles
),
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_guild_voice_regions(self, guild_id: Snowflake) -> Request[typing.Tuple[VoiceRegion]]:
return Request[typing.Tuple[VoiceRegion]](
'GET', '/guilds/{guild_id}/regions', {'guild_id': guild_id}
)
def get_guild_invites(self, guild_id: Snowflake) -> Request[typing.Tuple[InviteMetadata]]:
return Request[typing.Tuple[InviteMetadata]](
'GET', '/guilds/{guild_id}/invites', {'guild_id': guild_id}
)
def get_guild_integrations(self, guild_id: Snowflake) -> Request[typing.Tuple[Integration]]:
return Request[typing.Tuple[Integration]](
'GET', '/guilds/{guild_id}/integrations', {'guild_id': guild_id}
)
def delete_guild_integration(
self, guild_id: Snowflake, integration_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/integrations/{integration_id}',
{'guild_id': guild_id, 'integration_id': integration_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_guild_widget_settings(self, guild_id: Snowflake) -> Request[GuildWidget]:
return Request[GuildWidget]('GET', '/guilds/{guild_id}/widget', {'guild_id': guild_id})
def modify_guild_widget(
self,
guild_id: Snowflake,
*,
# TODO: this is not DRY...
enabled: Unknownish[bool] = UNKNOWN,
channel_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildWidget]:
return Request[GuildWidget](
'PATCH',
'/guilds/{guild_id}/widget',
{'guild_id': guild_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: is it even worth making a model for this one route?
def get_guild_widget(self, guild_id: Snowflake) -> Request[typing.Dict[str, typing.Any]]:
return Request[typing.Dict[str, typing.Any]](
'GET', '/guilds/{guild_id}/widget.json', {'guild_id': guild_id}
)
# TODO: check this partial out.
def get_guild_vanity_url(self, guild_id: Snowflake) -> Request[typing.Dict[str, typing.Any]]:
return Request[typing.Dict[str, typing.Any]](
'GET', '/guilds/{guild_id}/vanity-url', {'guild_id': guild_id}
)
# ... I guess it's a string? it's an image though :thinking:
# TODO: is there a better way to type this?
def get_guild_widget_image(
self, guild_id: Snowflake, *, style: Unknownish[WidgetStyleOptions] = UNKNOWN
) -> Request[str]:
return Request[str](
'GET',
'/guilds/{guild_id}/widget.png',
{'guild_id': guild_id},
params=prepare(self, {'style': style}),
)
def get_guild_welcome_screen(self, guild_id: Snowflake) -> Request[WelcomeScreen]:
return Request[WelcomeScreen](
'GET', '/guilds/{guild_id}/welcome-screen', {'guild_id': guild_id}
)
def modify_guild_welcome_screen(
self,
guild_id: Snowflake,
*,
enabled: Unknownish[typing.Optional[bool]] = UNKNOWN,
welcome_channels: Unknownish[
typing.Optional[typing.Iterable[WelcomeScreenChannel]]
] = UNKNOWN,
description: Unknownish[typing.Optional[str]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[WelcomeScreen]:
return Request[WelcomeScreen](
'PATCH',
'/guilds/{guild_id}/welcome-screen',
{'guild_id': guild_id},
json=prepare(
self,
{
'enabled': enabled,
'welcome_channels': tuple_(welcome_channels),
'description': description,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: what does this return?
def modify_current_user_voice_state(
self,
guild_id: Snowflake,
*,
channel_id: Snowflake,
suppress: Unknownish[bool] = UNKNOWN,
request_to_speak_timestamp: Unknownish[typing.Optional[datetime.datetime]] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PATCH',
'/guilds/{guild_id}/voice-states/@me',
{'guild_id': guild_id},
json=prepare(
self,
{
'channel_id': channel_id,
'suppress': suppress,
'request_to_speak_timestamp': request_to_speak_timestamp,
},
),
)
# TODO: what does this return?
def modify_user_voice_state(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
channel_id: Snowflake,
suppress: Unknownish[bool] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PATCH',
'/guilds/{guild_id}/voice-states/{user_id}',
{'guild_id': guild_id, 'user_id': user_id},
json=prepare(self, {'channel_id': channel_id, 'suppress': suppress}),
)
def list_scheduled_events_for_guild(
self, guild_id: Snowflake, *, with_user_count: Unknownish[bool] = UNKNOWN
) -> Request[typing.List[GuildScheduledEvent]]:
return Request[typing.List[GuildScheduledEvent]](
'GET',
'/guilds/{guild_id}/scheduled-events',
{'guild_id': guild_id},
json=prepare(self, {'with_user_count': with_user_count}),
)
def create_guild_scheduled_event(
self,
guild_id: Snowflake,
*,
channel_id: Unknownish[Snowflake] = UNKNOWN,
entity_metadata: Unknownish[GuildScheduledEventEntityMetadata] = UNKNOWN,
name: str,
privacy_level: GuildScheduledEventPrivacyLevel,
scheduled_start_time: datetime.datetime,
scheduled_end_time: Unknownish[datetime.datetime] = UNKNOWN,
description: Unknownish[str] = UNKNOWN,
entity_type: GuildScheduledEventEntityType,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildScheduledEvent]:
return Request[GuildScheduledEvent](
'POST',
'/guilds/{guild_id}/scheduled-events',
{'guild_id': guild_id},
json=prepare(
self,
{
'channel_id': channel_id,
'entity_metadata': entity_metadata,
'name': name,
'privacy_level': privacy_level,
'scheduled_start_time': scheduled_start_time,
'scheduled_end_time': scheduled_end_time,
'description': description,
'entity_type': entity_type,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_guild_scheduled_event(
self,
guild_id: Snowflake,
event_id: Snowflake,
*,
with_user_count: Unknownish[bool] = UNKNOWN,
) -> Request[GuildScheduledEvent]:
return Request[GuildScheduledEvent](
'GET',
'/guilds/{guild_id}/scheduled-events/{event_id}',
{'guild_id': guild_id, 'event_id': event_id},
json=prepare(self, {'with_user_count': with_user_count}),
)
# TODO: are the arguments to this... nullable too?
def modify_guild_scheduled_event(
self,
guild_id: Snowflake,
event_id: Snowflake,
*,
channel_id: Unknownish[Snowflake] = UNKNOWN,
entity_metadata: Unknownish[GuildScheduledEventEntityMetadata] = UNKNOWN,
name: Unknownish[str] = UNKNOWN,
privacy_level: Unknownish[GuildScheduledEventPrivacyLevel] = UNKNOWN,
scheduled_start_time: Unknownish[datetime.datetime] = UNKNOWN,
scheduled_end_time: Unknownish[datetime.datetime] = UNKNOWN,
description: Unknownish[str] = UNKNOWN,
entity_type: Unknownish[GuildScheduledEventEntityType] = UNKNOWN,
status: Unknownish[EventStatus] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[GuildScheduledEvent]:
return Request[GuildScheduledEvent](
'PATCH',
'/guilds/{guild_id}/scheduled-events/{event_id}',
{'guild_id': guild_id, 'event_id': event_id},
json=prepare(
self,
{
'channel_id': channel_id,
'entity_metadata': entity_metadata,
'name': name,
'privacy_level': privacy_level,
'scheduled_start_time': scheduled_start_time,
'scheduled_end_time': scheduled_end_time,
'description': description,
'entity_type': entity_type,
'status': status,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_guild_scheduled_event(
self, guild_id: Snowflake, event_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/scheduled-events/{event_id}',
{'guild_id': guild_id, 'event_id': event_id},
)
def get_guild_scheduled_event_users(
self,
guild_id: Snowflake,
event_id: Snowflake,
*,
limit: Unknownish[int] = UNKNOWN,
with_member: Unknownish[bool] = UNKNOWN,
before: Unknownish[Snowflake] = UNKNOWN,
after: Unknownish[Snowflake] = UNKNOWN,
) -> Request[typing.List[GuildScheduledEventUser]]:
return Request[typing.List[GuildScheduledEventUser]](
'GET',
'/guilds/{guild_id}/scheduled-events/{event_id}/users',
{'guild_id': guild_id, 'event_id': event_id},
params=prepare(
self,
{'limit': limit, 'with_member': with_member, 'before': before, 'after': after},
),
)
def get_guild_template(self, template_code: str) -> Request[GuildTemplate]:
return Request[GuildTemplate](
'GET', '/guilds/template/{template_code}', {'template_code': template_code}
)
def create_guild_from_guild_template(
self,
template_code: str,
*,
name: str,
# https://discord.com/developers/docs/reference#image-data
icon: Unknownish[str] = UNKNOWN,
) -> Request[Guild]:
return Request[Guild](
'POST',
'/guilds/templates/{template_code}',
{'template_code': template_code},
json=prepare(self, {'name': name, 'icon': icon}),
)
def get_guild_templates(self, guild_id: Snowflake) -> Request[typing.Tuple[GuildTemplate]]:
return Request[typing.Tuple[GuildTemplate]](
'GET', '/guilds/{guild_id}/templates', {'guild_id': guild_id}
)
def create_guild_template(
self,
guild_id: Snowflake,
*,
name: str,
description: Unknownish[typing.Optional[str]] = UNKNOWN,
) -> Request[GuildTemplate]:
return Request[GuildTemplate](
'POST',
'/guilds/{guild_id}/templates',
{'guild_id': guild_id},
json=prepare(self, {'name': name, 'description': description}),
)
def sync_guild_template(
self, guild_id: Snowflake, template_code: str
) -> Request[GuildTemplate]:
return Request[GuildTemplate](
'PUT',
'/guilds/{guild_id}/templates/{template_code}',
{'guild_id': guild_id, 'template_code': template_code},
)
def modify_guild_template(
self,
guild_id: Snowflake,
template_code: str,
*,
name: Unknownish[str] = UNKNOWN,
description: Unknownish[typing.Optional[str]] = UNKNOWN,
) -> Request[GuildTemplate]:
return Request[GuildTemplate](
'PUT',
'/guilds/{guild_id}/templates/{template_code}',
{'guild_id': guild_id, 'template_code': template_code},
json=prepare(self, {'name': name, 'description': description}),
)
def delete_guild_template(
self, guild_id: Snowflake, template_code: str
) -> Request[GuildTemplate]:
return Request[GuildTemplate](
'DELETE',
'/guilds/{guild_id}/templates/{template_code}',
{'guild_id': guild_id, 'template_code': template_code},
)
def get_invite(
self,
invite_code: str,
*,
with_counts: Unknownish[bool] = UNKNOWN,
with_expiration: Unknownish[bool] = UNKNOWN,
guild_scheduled_event_id: Unknownish[Snowflake] = UNKNOWN,
) -> Request[Invite]:
return Request[Invite](
'GET',
'/invites/{invite_code}',
{'invite_code': invite_code},
params={
'with_counts': with_counts,
'with_expiration': with_expiration,
'guild_scheduled_event_id': guild_scheduled_event_id,
},
)
def delete_invite(
self, invite_code: str, *, reason: Unknownish[str] = UNKNOWN
) -> Request[Invite]:
return Request[Invite](
'DELETE',
'/invites/{invite_code}',
{'invite_code': invite_code},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: what does this return?
def create_stage_instance(
self,
*,
channel_id: Snowflake,
topic: str,
privacy_level: Unknownish[PrivacyLevel] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[typing.Any]:
return Request[typing.Any](
'POST',
'/stage-instances',
{},
json=prepare(
self, {'channel_id': channel_id, 'topic': topic, 'privacy_level': privacy_level}
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_stage_instance(self, channel_id: Snowflake) -> Request[StageInstance]:
return Request[StageInstance](
'GET', '/stage-instances/{channel_id}', {'channel_id': channel_id}
)
# TODO: what does this return?
def modify_stage_instance(
self,
channel_id: Snowflake,
*,
topic: Unknownish[str] = UNKNOWN,
privacy_level: Unknownish[PrivacyLevel] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PATCH',
'/stage-instances/{channel_id}',
{'channel_id': channel_id},
json=prepare(self, {'topic': topic, 'privacy_level': privacy_level}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: what does this return?
def delete_stage_instance(
self, channel_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/stage-instances/{channel_id}',
{'channel_id': channel_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_sticker(self, sticker_id: Snowflake) -> Request[Sticker]:
return Request[Sticker]('GET', '/stickers/{sticker_id}', {'sticker_id': sticker_id})
def list_nitro_sticker_packs(self) -> Request[NitroStickerPacks]:
return Request[NitroStickerPacks]('GET', '/sticker-packs', {})
def list_guild_sickers(self, guild_id: Snowflake) -> Request[typing.Tuple[Sticker]]:
return Request[typing.Tuple[Sticker]](
'GET', '/guilds/{guild_id}/stickers', {'guild_id': guild_id}
)
def get_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake) -> Request[Sticker]:
return Request[Sticker](
'GET',
'/guilds/{guild_id}/stickers/{sticker_id}',
{'guild_id': guild_id, 'sticker_id': sticker_id},
)
def create_guild_sticker(
self,
guild_id: Snowflake,
*,
name: str,
# TODO: this is probably `typing.Optional`?
description: str,
tags: str,
# TODO: better file type
file: object,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Sticker]:
return Request[Sticker](
'POST',
'/guilds/{guild_id}/stickers',
{'guild_id': guild_id},
data=prepare(self, {'name': name, 'description': description, 'tags': tags}),
files={'file': file},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def modify_guild_sticker(
self,
guild_id: Snowflake,
sticker_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
description: Unknownish[typing.Optional[str]] = UNKNOWN,
tags: Unknownish[str] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Sticker]:
return Request[Sticker](
'PATCH',
'/guilds/{guild_id}/stickers/{sticker_id}',
{'guild_id': guild_id, 'sticker_id': sticker_id},
json=prepare(self, {'name': name, 'description': description, 'tags': tags}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_guild_sticker(
self, guild_id: Snowflake, sticker_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/guilds/{guild_id}/stickers/{sticker_id}',
{'guild_id': guild_id, 'sticker_id': sticker_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_current_user(self) -> Request[User]:
return Request[User]('GET', '/users/@me', {})
def get_user(self, user_id: Snowflake) -> Request[User]:
return Request[User]('GET', '/users/{user_id}', {'user_id': user_id})
def modify_current_user(
self,
*,
username: Unknownish[str] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
avatar: Unknownish[typing.Optional[str]] = UNKNOWN,
) -> Request[User]:
return Request[User](
'PATCH', '/users/@me', {}, json=prepare(self, {'username': username, 'avatar': avatar})
)
# TODO: partial guild object?
def get_current_user_guilds(self) -> Request[typing.Tuple[typing.Dict[str, typing.Any]]]:
return Request[typing.Tuple[typing.Dict[str, typing.Any]]]('GET', '/users/@me/guilds', {})
def get_current_user_guild_member(self, guild_id: Snowflake) -> Request[GuildMember]:
return Request[GuildMember](
'GET', '/users/@me/guilds/{guild_id}/member', {'guild_id': guild_id}
)
def leave_guild(self, guild_id: Snowflake) -> Request[None]:
return Request[None]('DELETE', '/users/@me/guilds/{guild_id}', {'guild_id': guild_id})
def create_dm(self, *, recipient_id: Snowflake) -> Request[Channel]:
return Request[Channel](
'POST', '/users/@me/channels', {}, json=prepare(self, {'recipient_id': recipient_id})
)
# TODO: do DMs created with this really not show up in client?
def create_group_dm(
self, *, access_tokens: typing.Iterable[str], nicks: typing.Dict[Snowflake, str]
) -> Request[Channel]:
return Request[Channel](
'POST',
'/users/@me/channels',
{},
json=prepare(self, {'access_tokens': tuple_(access_tokens), 'nicks': nicks}),
)
def get_user_connections(self) -> Request[typing.Tuple[UserConnection]]:
return Request[typing.Tuple[UserConnection]]('GET', '/users/@me/connections', {})
def list_voice_regions(self) -> Request[typing.Tuple[VoiceRegion]]:
return Request[typing.Tuple[VoiceRegion]]('GET', '/voice/regions', {})
# TODO: does this really not support audit log reason?
def create_webhook(
self,
channel_id: Snowflake,
*,
name: str,
# https://discord.com/developers/docs/reference#image-data
avatar: Unknownish[typing.Optional[str]] = UNKNOWN,
) -> Request[Webhook]:
return Request[Webhook](
'POST',
'/channels/{channel_id}/webhooks',
{'channel_id': channel_id},
json=prepare(self, {'name': name, 'avatar': avatar}),
)
def get_channel_webhooks(self, channel_id: Snowflake) -> Request[typing.Tuple[Webhook]]:
return Request[typing.Tuple[Webhook]](
'GET', '/channels/{channel_id}/webhooks', {'channel_id': channel_id}
)
def get_guild_webhooks(self, guild_id: Snowflake) -> Request[typing.Tuple[Webhook]]:
return Request[typing.Tuple[Webhook]](
'GET', '/guilds/{guild_id}/webhooks', {'guild_id': guild_id}
)
def get_webhook(self, webhook_id: Snowflake) -> Request[Webhook]:
return Request[Webhook]('GET', '/webhooks/{webhook_id}', {'webhook_id': webhook_id})
def get_webhook_with_token(
self, webhook_id: Snowflake, webhook_token: str
) -> Request[Webhook]:
return Request[Webhook](
'GET',
'/webhooks/{webhook_id}/{webhook_token}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token},
)
def modify_webhook(
self,
webhook_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
avatar: Unknownish[typing.Optional[str]] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
channel_id: Unknownish[Snowflake] = UNKNOWN,
) -> Request[Webhook]:
return Request[Webhook](
'PATCH',
'/webhooks/{webhook_id}',
{'webhook_id': webhook_id},
json=prepare(self, {'name': name, 'avatar': avatar, 'channel_id': channel_id}),
)
def modify_webhook_with_token(
self,
webhook_id: Snowflake,
webhook_token: str,
*,
name: Unknownish[str] = UNKNOWN,
avatar: Unknownish[typing.Optional[str]] = UNKNOWN,
# https://discord.com/developers/docs/reference#image-data
channel_id: Unknownish[Snowflake] = UNKNOWN,
) -> Request[Webhook]:
return Request[Webhook](
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token},
json=prepare(self, {'name': name, 'avatar': avatar, 'channel_id': channel_id}),
)
def delete_webhook(self, webhook_id: Snowflake) -> Request[None]:
return Request[None]('DELETE', '/webhooks/{webhook_id}', {'webhook_id': webhook_id})
def delete_webhook_with_token(
self, webhook_id: Snowflake, webhook_token: str
) -> Request[None]:
return Request[None](
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token},
)
# TODO: webhooks can't send stickers?
# TODO: what does this return?
def execute_webhook(
self,
webhook_id: Snowflake,
webhook_token: str,
*,
# one of these is required:
content: Unknownish[str] = UNKNOWN,
files: Unknownish[typing.Iterable[object]] = UNKNOWN, # TODO: better file type?
embeds: Unknownish[typing.Iterable[Embed]] = UNKNOWN,
# optional
wait: Unknownish[bool] = UNKNOWN,
thread_id: Unknownish[Snowflake] = UNKNOWN,
username: Unknownish[str] = UNKNOWN,
avatar_url: Unknownish[str] = UNKNOWN,
tts: Unknownish[bool] = UNKNOWN,
allowed_mentions: Unknownish[AllowedMentions] = UNKNOWN,
message_reference: Unknownish[MessageReference] = UNKNOWN,
components: Unknownish[typing.Iterable[Component]] = UNKNOWN,
# TODO: partial attachments
attachments: Unknownish[typing.Iterable[typing.Dict[str, typing.Any]]] = UNKNOWN,
) -> Request[None]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'username': username,
'avatar_url': avatar_url,
'tts': tts,
'allowed_mentions': allowed_mentions,
'message_reference': message_reference,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[None](
'POST',
'/webhooks/{webhook_id}/{webhook_token}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token},
params=prepare(self, {'wait': wait, 'thread_id': thread_id}),
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def get_webhook_message(
self,
webhook_id: Snowflake,
webhook_token: str,
message_id: Snowflake,
*,
thread_id: Unknownish[Snowflake] = UNKNOWN,
) -> Request[Message]:
return Request[Message](
'GET',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token, 'message_id': message_id},
params=prepare(self, {'thread_id': thread_id}),
)
def edit_webhook_message(
self,
webhook_id: Snowflake,
webhook_token: str,
message_id: Snowflake,
*,
thread_id: Unknownish[Snowflake] = UNKNOWN,
content: Unknownish[typing.Optional[str]] = UNKNOWN,
embeds: Unknownish[typing.Optional[typing.Iterable[Embed]]] = UNKNOWN,
# TODO: better file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
allowed_mentions: Unknownish[typing.Optional[AllowedMentions]] = UNKNOWN,
# TODO: partial attachments type
attachments: Unknownish[
typing.Optional[typing.Iterable[typing.Dict[str, typing.Any]]]
] = UNKNOWN,
components: Unknownish[typing.Optional[typing.Iterable[Component]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'allowed_mentions': allowed_mentions,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[Message](
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token, 'message_id': message_id},
params=prepare(self, {'thread_id': thread_id}),
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def delete_webhook_message(
self,
webhook_id: Snowflake,
webhook_token: str,
message_id: Snowflake,
*,
thread_id: Unknownish[Snowflake],
) -> Request[None]:
return Request[None](
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{'webhook_id': webhook_id, 'webhook_token': webhook_token, 'message_id': message_id},
params=prepare(self, {'thread_id': thread_id}),
)
def get_gateway(self) -> Request[GatewayResponse]:
return Request[GatewayResponse]('GET', '/gateway', {})
def get_gateway_bot(self) -> Request[DetailedGatewayResponse]:
return Request[DetailedGatewayResponse]('GET', '/gateway/bot', {})
def get_current_bot_application_information(self) -> Request[Application]:
return Request[Application]('GET', '/oauth2/applications/@me', {})
def get_current_authorization_information(self) -> Request[AuthorizationInformation]:
return Request[AuthorizationInformation]('GET', '/oauth2/@me', {})
def get_global_application_commands(
self, application_id: Snowflake
) -> Request[typing.Tuple[ApplicationCommand]]:
return Request[typing.Tuple[ApplicationCommand]](
'GET', '/applications/{application_id}', {'application_id': application_id}
)
def create_global_application_command(
self,
application_id: Snowflake,
*,
name: str,
description: str,
options: Unknownish[typing.Iterable[ApplicationCommandOption]] = UNKNOWN,
default: Unknownish[bool] = UNKNOWN,
type: Unknownish[CommandTypes] = UNKNOWN,
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'POST',
'/applications/{application_id}/commands',
{'application_id': application_id},
json=prepare(
self,
{
'name': name,
'description': description,
'options': tuple_(options),
'default': default,
'type': type,
},
),
)
def get_global_application_command(
self, application_id: Snowflake, command_id: Snowflake
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'GET',
'/applications/{application_id}/{command_id}',
{'application_id': application_id, 'command_id': command_id},
)
def edit_global_application_command(
self,
application_id: Snowflake,
command_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
description: Unknownish[str] = UNKNOWN,
options: Unknownish[typing.Iterable[ApplicationCommandOption]] = UNKNOWN,
default_permission: Unknownish[bool] = UNKNOWN,
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'PATCH',
'/applications/{application_id}/commands/{command_id}',
{'application_id': application_id, 'command_id': command_id},
json=prepare(
self,
{
'name': name,
'description': description,
'options': tuple_(options),
'default_permission': default_permission,
},
),
)
def delete_global_application_command(
self, application_id: Snowflake, command_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/applications/{application_id}/commands/{command_id}',
{'application_id': application_id, 'command_id': command_id},
)
def bulk_overwrite_global_application_commands(
self, application_id: Snowflake, *, commands: typing.Iterable[ApplicationCommand]
) -> Request[typing.Tuple[ApplicationCommand]]:
return Request[typing.Tuple[ApplicationCommand]](
'PUT',
'/applications/{application_id}/commands',
{'application_id': application_id},
json=self.conv.unstructure(tuple_(commands)),
)
def get_guild_application_commands(
self, application_id: Snowflake, guild_id: Snowflake
) -> Request[typing.Tuple[ApplicationCommand]]:
return Request[typing.Tuple[ApplicationCommand]](
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands',
{'application_id': application_id, 'guild_id': guild_id},
)
def create_guild_application_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
*,
name: str,
description: str,
options: Unknownish[typing.Iterable[ApplicationCommandOption]] = UNKNOWN,
default: Unknownish[bool] = UNKNOWN,
type: Unknownish[CommandTypes] = UNKNOWN,
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'POST',
'/applications/{application_id}/commands',
{'application_id': application_id},
json=prepare(
self,
{
'name': name,
'description': description,
'options': tuple_(options),
'default': default,
'type': type,
},
),
)
def get_guild_application_command(
self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
{'application_id': application_id, 'guild_id': guild_id, 'command_id': command_id},
)
def edit_guild_application_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
*,
name: Unknownish[str] = UNKNOWN,
description: Unknownish[str] = UNKNOWN,
options: Unknownish[typing.Iterable[ApplicationCommandOption]] = UNKNOWN,
default_permission: Unknownish[bool] = UNKNOWN,
) -> Request[ApplicationCommand]:
return Request[ApplicationCommand](
'PATCH',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
{'application_id': application_id, 'guild_id': guild_id, 'command_id': command_id},
json=prepare(
self,
{
'name': name,
'description': description,
'options': tuple_(options),
'default_permission': default_permission,
},
),
)
def delete_guild_application_command(
self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
{'application_id': application_id, 'guild_id': guild_id, 'command_id': command_id},
)
def bulk_overwrite_guild_application_commands(
self,
application_id: Snowflake,
guild_id: Snowflake,
*,
commands: typing.Iterable[ApplicationCommand],
) -> Request[typing.Tuple[ApplicationCommand]]:
return Request[typing.Tuple[ApplicationCommand]](
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands',
{'application_id': application_id, 'guild_id': guild_id},
json=self.conv.unstructure(tuple_(commands)),
)
def get_guild_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
) -> Request[typing.Tuple[GuildApplicationCommandPermissions]]:
return Request[typing.Tuple[GuildApplicationCommandPermissions]](
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
{'application_id': application_id, 'guild_id': guild_id},
)
def get_application_command_permissions(
self, application_id: Snowflake, guild_id: Snowflake, command_id: Snowflake
) -> Request[typing.Tuple[ApplicationCommandPermissions]]:
return Request[typing.Tuple[ApplicationCommandPermissions]](
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
{'application_id': application_id, 'guild_id': guild_id, 'command_id': command_id},
)
def edit_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
*,
permissions: typing.Iterable[ApplicationCommandPermissions],
) -> Request[GuildApplicationCommandPermissions]:
return Request[GuildApplicationCommandPermissions](
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
{'application_id': application_id, 'guild_id': guild_id, 'command_id': command_id},
json=prepare(self, {'permissions': tuple_(permissions)}),
)
def batch_edit_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
*,
# TODO: partial GuildApplicationCommandPermissions
new_permissions: typing.Dict[typing.Any, typing.Any],
) -> Request[typing.Tuple[GuildApplicationCommandPermissions]]:
return Request[typing.Tuple[GuildApplicationCommandPermissions]](
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
{'application_id': application_id, 'guild_id': guild_id},
json=self.conv.unstructure(new_permissions),
)
# TODO: what does this return?
def create_interaction_response(
self,
application_id: Snowflake,
interaction_token: str,
*,
response: InteractionResponse,
# TODO: narrow file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
) -> Request[None]:
return Request[None](
'POST',
# this ratelimits on webhook
'/interactions/{webhook_id}/{webhook_token}/callback',
{'webhook_id': application_id, 'webhook_token': interaction_token},
data={'payload_json': json.dumps(self.conv.unstructure(response))},
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def get_original_interaction_response(
self,
application_id: Snowflake,
interaction_token: str,
) -> Request[Message]:
return Request[Message](
'GET',
# this ratelimits on webhook
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
{'webhook_id': application_id, 'webhook_token': interaction_token},
)
def edit_original_interaction_response(
self,
application_id: Snowflake,
interaction_token: str,
*,
content: Unknownish[typing.Optional[str]] = UNKNOWN,
embeds: Unknownish[typing.Optional[typing.Iterable[Embed]]] = UNKNOWN,
# TODO: narrow file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
allowed_mentions: Unknownish[typing.Optional[AllowedMentions]] = UNKNOWN,
attachments: Unknownish[typing.Optional[typing.Iterable[Attachment]]] = UNKNOWN,
components: Unknownish[typing.Optional[typing.Iterable[Component]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'allowed_mentions': allowed_mentions,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[Message](
'PATCH',
# this ratelimits on webhook
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
{'webhook_id': application_id, 'webhook_token': interaction_token},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def delete_original_interaction_response(
self, application_id: Snowflake, interaction_token: str
) -> Request[None]:
return Request[None](
'DELETE',
# this ratelimits on webhook
'/webhooks/{webhook_id}/{webhook_token}/messages/@original',
{'webhook_id': application_id, 'webhook_token': interaction_token},
)
# TODO: interaction followups can't send stickers?
# TODO: what does this return?
def create_followup_message(
self,
application_id: Snowflake,
interaction_token: str,
*,
# one of these is required:
content: Unknownish[str] = UNKNOWN,
# TODO: narrow file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
embeds: Unknownish[typing.Iterable[Embed]] = UNKNOWN,
# optional
flags: Unknownish[MessageFlags] = UNKNOWN,
# TODO: interaction followups probably can't do these?
username: Unknownish[str] = UNKNOWN,
avatar_url: Unknownish[str] = UNKNOWN,
tts: Unknownish[bool] = UNKNOWN,
allowed_mentions: Unknownish[AllowedMentions] = UNKNOWN,
message_reference: Unknownish[MessageReference] = UNKNOWN,
components: Unknownish[typing.Iterable[Component]] = UNKNOWN,
) -> Request[None]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'username': username,
'avatar_url': avatar_url,
'tts': tts,
'allowed_mentions': allowed_mentions,
'message_reference': message_reference,
'components': tuple_(components),
},
)
return Request[None](
'POST',
# this ratelimits on webhook
'/webhooks/{webhook_id}/{webhook_token}',
{'webhook_id': application_id, 'webhook_token': interaction_token},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def get_followup_message(
self, application_id: Snowflake, interaction_token: str, message_id: Snowflake
) -> Request[Message]:
return Request[Message](
'GET',
# this ratelimits on webhook
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{
'webhook_id': application_id,
'webhook_token': interaction_token,
'message_id': message_id,
},
)
def edit_followup_message(
self,
application_id: Snowflake,
interaction_token: str,
message_id: Snowflake,
*,
content: Unknownish[typing.Optional[str]] = UNKNOWN,
embeds: Unknownish[typing.Optional[typing.Iterable[Embed]]] = UNKNOWN,
# TODO: better file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
allowed_mentions: Unknownish[typing.Optional[AllowedMentions]] = UNKNOWN,
attachments: Unknownish[typing.Optional[typing.Iterable[Attachment]]] = UNKNOWN,
components: Unknownish[typing.Optional[typing.Iterable[Component]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'allowed_mentions': allowed_mentions,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[Message](
'PATCH',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{
'webhook_id': application_id,
'webhook_token': interaction_token,
'message_id': message_id,
},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def delete_followup_message(
self, application_id: Snowflake, interaction_token: str, message_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}',
{
'webhook_id': application_id,
'webhook_token': interaction_token,
'message_id': message_id,
},
)
| StarcoderdataPython |
3360709 | import re
import requests
from bs4 import BeautifulSoup
def get_page_content(url, timeout=5):
try:
# Make the request
response = requests.get(url, stream=True,timeout=timeout)
# Check status code
if response.status_code != 200:
raise Exception(response.status_code)
return BeautifulSoup(response.content, "html.parser")
# If the request timed out print a warning
except requests.Timeout:
raise Exception('Timeout')
except:
raise Exception('Error')
def extract_features(page_content, features):
extracted_features = []
for feature in features:
tag_content = page_content.select(feature.selector)
text = tag_content[0].text if tag_content else ''
values = re.findall(feature.pattern, text)
extracted_features.append(', '.join(values) if feature.multiple_values else values[0])
return extracted_features | StarcoderdataPython |
9768221 | '''Main module.'''
import numpy as np
import matplotlib.pyplot as plt
from genetic_algorithm.core.utils import dim_number, chromosome_length
from genetic_algorithm.core.tools import create_population, elitism, selection, crossover, mutation
from genetic_algorithm.core.benchmark import get_scores, get_value, solved
def algorithm(params: dict):
'''
Runs genetic algorithm.
Args
params (dict): Algorithm parameters.
'''
# number of generations
iter_num = params['algorithm']['numberOfIterations']
# best solution ever found
best_ind = {'fitness': 0, 'solution': None, 'value': None, 'solved': False}
solution_found = False
# stats to display
max_fit, min_fit, ave_fit = [], [], []
# create population
chromosomes, dim_num, chrom_length = create_population(params, dim_number, chromosome_length)
# get fitness values and decoded real numbers
fit_values, real_nums = get_scores(chromosomes, dim_num, chrom_length, params)
# update algorithm statistics
update_stats(fit_values, real_nums, best_ind, dim_num, max_fit, min_fit, ave_fit, params)
for i in range(iter_num):
print(f'Generation #{i+1}...')
# next generation
chromosomes = generation(chromosomes, fit_values, params)
# get new fitness values and decoded real numbers
fit_values, real_nums = get_scores(chromosomes, dim_num, chrom_length, params)
# update algorithm statistics
update_stats(fit_values, real_nums, best_ind, dim_num, max_fit, min_fit, ave_fit, params)
# check if soultion is found
if not solution_found:
solution_found = solved(best_ind, dim_num, params)
if solution_found:
best_ind['solved'] = True
best_ind['generation'] = i+1
print_stats(best_ind)
display_plot(max_fit, min_fit, ave_fit, iter_num)
def generation(chromosomes: np.ndarray, fit_values: np.ndarray, params: dict) -> np.ndarray:
'''
Generates a new population following selection, crossover and mutation.
Args:
chromosomes (np.ndarray): Current population.
fit_values (nd.array): Fitness valus.
params (dict): Algorithm parameters.
Returns:
np.ndarray: New generation.
'''
pop_size = params['algorithm']['populationSize']
# perform elitism strategy
if params['algorithm']['elitism']['strategy'] == 'enabled':
elit_size = params['algorithm']['elitism']['size']
best_inds = elitism(chromosomes, fit_values, elit_size)
children = []
while len(children) < (pop_size-elit_size):
# perform selection
parent1, parent2 = selection(fit_values, params)
# perform crossover
child = crossover(chromosomes, parent1, parent2, params)
children.append(child)
# convert to np.ndarray
children = np.vstack(children)
# mutate the new population
children = mutation(children, params)
# concatinate with the best individuals
if params['algorithm']['elitism']['strategy'] == 'enabled':
children = np.vstack((children, best_inds))
return children
def update_stats(fit_values: np.ndarray, real_nums: np.ndarray, best_ind: dict,\
dim_number: int, max_fit: list, min_fit: list, ave_fit: list, params: dict):
'''
Updates best individual found and traks fitness statistics.
Args:
fit_values (np.ndarray): Fitness values.
real_nums (np.ndarray): Real numbers.
best_ind (dict): Best individual.
dim_number (int): Number of dimensions.
max_fit (list): Keeps track of fitness max values.
min_fit (list): Keeps track of fitness min values.
ave_fit (list): Keeps track of fitness mean values.
'''
max_value = fit_values.max()
if max_value > best_ind['fitness']:
max_index = np.argmax(fit_values)
best_ind['fitness'] = max_value
best_ind['solution'] = real_nums[max_index].reshape(-1, dim_number)
best_ind['value'] = get_value(best_ind['solution'], params)
max_fit.append(max_value)
min_fit.append(fit_values.min())
ave_fit.append(fit_values.mean())
def print_stats(best_ind: dict):
'''
Prints algorithm statistics.
Args:
best_ind (dict): Best individual.
'''
print('-'*50)
if best_ind['solved']:
print('Solution found.')
print('Generation:', best_ind['generation'])
else:
print('Solution not found.')
solution = best_ind['solution']
value = best_ind['value']
for i, sln in enumerate(solution.flatten()):
print(f'x_{i}: {sln}')
print('f(x):', value)
print('-'*50)
def display_plot(max_fit: list, min_fit: list, ave_fit: list, iter_num: int):
'''
Displays fitness statistics.
Args:
max_fit (list): Maximum values.
min_fit (list): Minimum values.
ave_fit (list): Average vakues.
iter_num (int): Number of generations.
'''
plt.title("Fitness vs. Number of Generations")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.plot(range(iter_num+1), max_fit, label="Maximum")
plt.plot(range(iter_num+1), min_fit, label="Minimum")
plt.plot(range(iter_num+1), ave_fit, label="Average")
plt.ylim((0, 1.05))
plt.xticks(np.arange(0, iter_num+1, 2.0))
plt.legend()
plt.show()
| StarcoderdataPython |
120745 | #!/usr/bin/env python
"""Populate GitHub labels for issue tracker."""
from collections import namedtuple
import codecs
import yaml
import json
import sys
import os
import re
import requests
import urllib.parse
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
__version__ = "1.0.0"
RE_VALID_COLOR = re.compile('#[a-fA-F0-9]{6}')
class Api:
"""Class to post commands to the REST API."""
def __init__(self, token, user, repo):
"""Initialize."""
self.url = 'https://api.github.com'
self.token = token
self.user = user
self.repo = repo
def _delete(self, command, timeout=60, expected=200, headers=None):
"""Send a DELETE REST command."""
if timeout == 0:
timeout = None
if headers is None:
headers = {}
headers['Authorization'] = 'token {}'.format(self.token)
try:
resp = requests.delete(
command,
headers=headers,
timeout=timeout
)
assert resp.status_code == expected
except Exception:
raise RuntimeError('DELETE command failed: {}'.format(command))
def _patch(self, command, payload, timeout=60, expected=200, headers=None):
"""Send a PATCH REST command."""
if timeout == 0:
timeout = None
if headers is None:
headers = {}
headers['Authorization'] = 'token {}'.format(self.token)
if payload is not None:
payload = json.dumps(payload)
headers['content-type'] = 'application/json'
try:
resp = requests.patch(
command,
data=payload,
headers=headers,
timeout=timeout
)
assert resp.status_code == expected
except Exception:
raise RuntimeError('PATCH command failed: {}'.format(command))
def _post(self, command, payload, timeout=60, expected=200, headers=None):
"""Send a POST REST command."""
if timeout == 0:
timeout = None
if headers is None:
headers = {}
headers['Authorization'] = 'token {}'.format(self.token)
if payload is not None:
payload = json.dumps(payload)
headers['content-type'] = 'application/json'
try:
resp = requests.post(
command,
data=payload,
headers=headers,
timeout=timeout
)
assert resp.status_code == expected
except Exception:
raise RuntimeError('POST command failed: {}'.format(command))
def _get(self, command, payload=None, timeout=60, pages=False, expected=200, headers=None, text=False):
"""Send a GET REST request."""
if timeout == 0:
timeout = None
if headers is None:
headers = {}
headers['Authorization'] = 'token {}'.format(self.token)
data = None
while command:
try:
resp = requests.get(
command,
params=payload,
headers=headers,
timeout=timeout
)
assert resp.status_code == expected
command = resp.links.get('next', {}).get('url', '') if pages else ''
data = json.loads(resp.text) if not text else resp.text
if pages and not text:
for entry in data:
yield entry
else:
yield data
except Exception:
raise RuntimeError('GET command failed: {}'.format(command))
def get_contents(self, file, ref="master"):
"""Get contents."""
return list(
self._get(
'/'.join([self.url, 'repos', self.user, self.repo, 'contents', urllib.parse.quote(file)]),
headers={'Accept': 'application/vnd.github.v3.raw'},
payload={'ref': ref},
text=True
)
)[0]
def get_labels(self):
"""Get labels."""
return list(
self._get(
'/'.join([self.url, 'repos', self.user, self.repo, 'labels']),
pages=True,
headers={'Accept': 'application/vnd.github.symmetra-preview+json'}
)
)
def create_label(self, name, color, description):
"""Create label."""
self._post(
'/'.join([self.url, 'repos', self.user, self.repo, 'labels']),
{'name': name, 'color': color, 'description': description},
headers={'Accept': 'application/vnd.github.symmetra-preview+json'},
expected=201
)
def edit_label(self, old_name, new_name, color, description):
"""Edit label."""
self._patch(
'/'.join([self.url, 'repos', self.user, self.repo, 'labels', urllib.parse.quote(old_name)]),
{'new_name': new_name, 'color': color, 'description': description},
headers={'Accept': 'application/vnd.github.symmetra-preview+json'}
)
def delete_label(self, name):
"""Delete a label."""
self._delete(
'/'.join([self.url, 'repos', self.user, self.repo, 'labels', urllib.parse.quote(name)]),
headers={'Accept': 'application/vnd.github.symmetra-preview+json'},
expected=204
)
# Label handling
class LabelEdit(namedtuple('LabelEdit', ['old', 'new', 'color', 'description', 'modified'])):
"""Label Edit tuple."""
class GhLabelSync:
"""GitHub label sync class."""
def __init__(self, config, git, delete=False, debug=False):
"""Initialize."""
self.git = git
self.debug = debug
self.delete = delete
config = self._get_config(config)
self._parse_colors(config)
self._parse_labels(config)
def _get_config(self, config):
"""Get config."""
print('Reading labels from {}'.format(config))
return yaml.load(self.git.get_contents(config, ref=os.getenv("GITHUB_SHA")), Loader=Loader)
def _validate_str(self, name):
"""Validate name."""
if not isinstance(name, str):
raise TypeError("Key value is not of type 'str', type '{}' received instead".format(type(name)))
def _validate_color(self, color):
"""Validate color."""
self._validate_str(color)
if RE_VALID_COLOR.match(color) is None:
raise ValueError('{} is not a valid color'.format(color))
def _parse_labels(self, config):
"""Parse labels."""
self.labels = []
seen = set()
for value in config.get('labels', {}):
name = value['name']
self._validate_str(name)
value['color'] = self._resolve_color(value['color'])
if 'renamed' in value:
self._validate_str(value['renamed'])
if 'description' in value and not isinstance(value['description'], str):
raise ValueError("Description for '{}' should be of type str".format(name))
if name.lower() in seen:
raise ValueError("The name '{}' is already present in the label list".format(name))
seen.add(name.lower())
self.labels.append(value)
self.ignores = set()
for name in config.get('ignores', []):
self._validate_str(name)
self.ignores.add(name.lower())
def _resolve_color(self, color):
"""Parse color."""
if RE_VALID_COLOR.match(color) is None:
color = self.colors[color]
return color
def _parse_colors(self, config):
"""Get colors."""
self.colors = {}
for name, color in config.get('colors', {}).items():
self._validate_color(color)
self._validate_str(name)
if name in self.colors:
raise ValueError("The name '{}' is already present in the color list".format(name))
self.colors[name] = color[1:]
def _find_label(self, label, label_color, label_description):
"""Find label."""
edit = None
for value in self.labels:
name = value['name']
old_name = value.get('renamed', name)
if label.lower() != old_name.lower():
continue
new_name = name
color = value['color']
description = value.get('description', '')
modified = False
# Editing an existing label
if (
label.lower() == old_name.lower() and
(label_color.lower() != color.lower() or label_description != description)
):
modified = True
edit = LabelEdit(old_name, new_name, color, description, modified=modified)
break
return edit
def sync(self):
"""Sync labels."""
updated = set()
for label in self.git.get_labels():
edit = self._find_label(label['name'], label['color'], label['description'])
if edit is not None and edit.modified:
print(' Updating {}: #{} "{}"'.format(edit.new, edit.color, edit.description))
if not self.debug:
self.git.edit_label(edit.old, edit.new, edit.color, edit.description)
updated.add(edit.old)
updated.add(edit.new)
else:
if edit is None and self.delete and label['name'].lower() not in self.ignores:
print(' Deleting {}: #{} "{}"'.format(label['name'], label['color'], label['description']))
if not self.debug:
self.git.delete_label(label['name'])
else:
print(' Skipping {}: #{} "{}"'.format(label['name'], label['color'], label['description']))
updated.add(label['name'])
for value in self.labels:
name = value['name']
color = value['color']
description = value.get('description', '')
if name not in updated:
print(' Creating {}: #{} "{}"'.format(name, color, description))
if not self.debug:
self.git.create_label(name, color, description)
def main():
"""Main."""
dbg = os.getenv("INPUT_DEBUG", 'disable')
if dbg == 'enable':
debug = True
elif dbg == 'disable':
debug = False
else:
raise ValueError('Unknown value for debug: {}'.format(dbg))
# Parse mode
mode = os.getenv("INPUT_MODE", 'normal')
if mode == 'delete':
delete = True
elif mode == 'normal':
delete = False
else:
raise ValueError('Unknown mode: {}'.format(mode))
# Get the user's name and repository so we can access the labels for the repository
repository = os.getenv("GITHUB_REPOSITORY")
if repository and '/' in repository:
user, repo = repository.split('/')
else:
raise ValueError('Could not acquire user name and repository name')
# Acquire access token
token = os.getenv("INPUT_TOKEN", '')
if not token:
raise ValueError('No token provided')
# Get label file
config = os.getenv("INPUT_FILE", '.github/labels.yml')
# Sync the labels
git = Api(token, user, repo)
GhLabelSync(config, git, delete, debug).sync()
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
11303211 | def solution(A):
"""Determine if a triangular triplet exists within a list of integers.
A triangular triplet consists of three points P, Q, and R, where:
(1) P + Q > R
(2) Q + R > P
(3) R + P > Q
Args:
A (list): A list of N integers.
Returns:
int: 1 if a triangular triplet exists, 0 if not.
Complexity:
Time: O(N*log(N))
Space: O(N)
"""
A = sorted(A)
N = len(A)
if N < 3:
return 0
for i in xrange(2, N):
if A[i] < (A[i-1] + A[i-2]):
return 1
else:
return 0
| StarcoderdataPython |
3285066 | <reponame>CODARcode/cheetah<filename>examples/02-coupling/mean_calculator.py<gh_stars>1-10
#!/usr/bin/env python3
#
# Distributed under the OSI-approved Apache License, Version 2.0. See
# accompanying file Copyright.txt for details.
#
# helloBPReaderHeatMap2D.py
#
#
# Created on: Dec 5th, 2017
# Author: <NAME> <EMAIL>
#
from mpi4py import MPI
import numpy
import adios2
import time
import sys
# MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# ADIOS portion
adios = adios2.ADIOS("adios2.xml", comm, adios2.DebugON)
ioRead = adios.DeclareIO("producer")
ibpStream = ioRead.Open('diagnostics.bp', adios2.Mode.Read, MPI.COMM_WORLD)
while(True):
time.sleep(2)
stepstat = ibpStream.BeginStep()
if stepstat == adios2.StepStatus.EndOfStream:
if rank==0:
print("{} encountered end of stream. Exiting ..".format(sys.argv[0]))
break
curStep = ibpStream.CurrentStep()
if rank == 0:
print("Parsing step {}".format(curStep))
var_psi = ioRead.InquireVariable("psi")
assert var_psi is not None, "could not find psi"
psi_size = var_psi.Shape()
if rank == 0:
print ("Size of psi: {}".format(psi_size))
var_psi.SetSelection([ [psi_size[0]//size * rank], [psi_size[0]//size] ])
inSize = var_psi.SelectionSize()
if curStep == 0:
psi = numpy.zeros(inSize, dtype=numpy.int)
ibpStream.Get(var_psi, psi)
ibpStream.EndStep()
print("Rank {}: Mean of values of psi in step {}: {}".format(rank, curStep, numpy.mean(psi)))
ibpStream.Close()
| StarcoderdataPython |
324726 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from typing import List
import numpy as np
import paddle
from paddle import nn
from paddle.nn import functional as F
from paddlespeech.t2s.audio.codec import decode_mu_law
from paddlespeech.t2s.modules.losses import sample_from_discretized_mix_logistic
from paddlespeech.t2s.modules.nets_utils import initialize
from paddlespeech.t2s.modules.upsample import Stretch2D
class ResBlock(nn.Layer):
def __init__(self, dims):
super().__init__()
self.conv1 = nn.Conv1D(dims, dims, kernel_size=1, bias_attr=False)
self.conv2 = nn.Conv1D(dims, dims, kernel_size=1, bias_attr=False)
self.batch_norm1 = nn.BatchNorm1D(dims)
self.batch_norm2 = nn.BatchNorm1D(dims)
def forward(self, x):
'''
conv -> bn -> relu -> conv -> bn + residual connection
'''
residual = x
x = self.conv1(x)
x = self.batch_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.batch_norm2(x)
return x + residual
class MelResNet(nn.Layer):
def __init__(self,
res_blocks: int=10,
compute_dims: int=128,
res_out_dims: int=128,
aux_channels: int=80,
aux_context_window: int=0):
super().__init__()
k_size = aux_context_window * 2 + 1
# pay attention here, the dim reduces aux_context_window * 2
self.conv_in = nn.Conv1D(
aux_channels, compute_dims, kernel_size=k_size, bias_attr=False)
self.batch_norm = nn.BatchNorm1D(compute_dims)
self.layers = nn.LayerList()
for _ in range(res_blocks):
self.layers.append(ResBlock(compute_dims))
self.conv_out = nn.Conv1D(compute_dims, res_out_dims, kernel_size=1)
def forward(self, x):
'''
Args:
x (Tensor): Input tensor (B, in_dims, T).
Returns:
Tensor: Output tensor (B, res_out_dims, T).
'''
x = self.conv_in(x)
x = self.batch_norm(x)
x = F.relu(x)
for f in self.layers:
x = f(x)
x = self.conv_out(x)
return x
class UpsampleNetwork(nn.Layer):
def __init__(self,
aux_channels: int=80,
upsample_scales: List[int]=[4, 5, 3, 5],
compute_dims: int=128,
res_blocks: int=10,
res_out_dims: int=128,
aux_context_window: int=2):
super().__init__()
# total_scale is the total Up sampling multiple
total_scale = np.prod(upsample_scales)
# TODO pad*total_scale is numpy.int64
self.indent = int(aux_context_window * total_scale)
self.resnet = MelResNet(
res_blocks=res_blocks,
aux_channels=aux_channels,
compute_dims=compute_dims,
res_out_dims=res_out_dims,
aux_context_window=aux_context_window)
self.resnet_stretch = Stretch2D(total_scale, 1)
self.up_layers = nn.LayerList()
for scale in upsample_scales:
k_size = (1, scale * 2 + 1)
padding = (0, scale)
stretch = Stretch2D(scale, 1)
conv = nn.Conv2D(
1, 1, kernel_size=k_size, padding=padding, bias_attr=False)
weight_ = paddle.full_like(conv.weight, 1. / k_size[1])
conv.weight.set_value(weight_)
self.up_layers.append(stretch)
self.up_layers.append(conv)
def forward(self, m):
'''
Args:
c (Tensor): Input tensor (B, C_aux, T).
Returns:
Tensor: Output tensor (B, (T - 2 * pad) * prob(upsample_scales), C_aux).
Tensor: Output tensor (B, (T - 2 * pad) * prob(upsample_scales), res_out_dims).
'''
# aux: [B, C_aux, T]
# -> [B, res_out_dims, T - 2 * aux_context_window]
# -> [B, 1, res_out_dims, T - 2 * aux_context_window]
aux = self.resnet(m).unsqueeze(1)
# aux: [B, 1, res_out_dims, T - 2 * aux_context_window]
# -> [B, 1, res_out_dims, (T - 2 * pad) * prob(upsample_scales)]
aux = self.resnet_stretch(aux)
# aux: [B, 1, res_out_dims, T * prob(upsample_scales)]
# -> [B, res_out_dims, T * prob(upsample_scales)]
aux = aux.squeeze(1)
# m: [B, C_aux, T] -> [B, 1, C_aux, T]
m = m.unsqueeze(1)
for f in self.up_layers:
m = f(m)
# m: [B, 1, C_aux, T*prob(upsample_scales)]
# -> [B, C_aux, T * prob(upsample_scales)]
# -> [B, C_aux, (T - 2 * pad) * prob(upsample_scales)]
m = m.squeeze(1)[:, :, self.indent:-self.indent]
# m: [B, (T - 2 * pad) * prob(upsample_scales), C_aux]
# aux: [B, (T - 2 * pad) * prob(upsample_scales), res_out_dims]
return m.transpose([0, 2, 1]), aux.transpose([0, 2, 1])
class WaveRNN(nn.Layer):
def __init__(
self,
rnn_dims: int=512,
fc_dims: int=512,
bits: int=9,
aux_context_window: int=2,
upsample_scales: List[int]=[4, 5, 3, 5],
aux_channels: int=80,
compute_dims: int=128,
res_out_dims: int=128,
res_blocks: int=10,
hop_length: int=300,
sample_rate: int=24000,
mode='RAW',
init_type: str="xavier_uniform", ):
'''
Args:
rnn_dims (int, optional): Hidden dims of RNN Layers.
fc_dims (int, optional): Dims of FC Layers.
bits (int, optional): bit depth of signal.
aux_context_window (int, optional): The context window size of the first convolution applied to the
auxiliary input, by default 2
upsample_scales (List[int], optional): Upsample scales of the upsample network.
aux_channels (int, optional): Auxiliary channel of the residual blocks.
compute_dims (int, optional): Dims of Conv1D in MelResNet.
res_out_dims (int, optional): Dims of output in MelResNet.
res_blocks (int, optional): Number of residual blocks.
mode (str, optional): Output mode of the WaveRNN vocoder.
`MOL` for Mixture of Logistic Distribution, and `RAW` for quantized bits as the model's output.
init_type (str): How to initialize parameters.
'''
super().__init__()
self.mode = mode
self.aux_context_window = aux_context_window
if self.mode == 'RAW':
self.n_classes = 2**bits
elif self.mode == 'MOL':
self.n_classes = 10 * 3
else:
RuntimeError('Unknown model mode value - ', self.mode)
# List of rnns to call 'flatten_parameters()' on
self._to_flatten = []
self.rnn_dims = rnn_dims
self.aux_dims = res_out_dims // 4
self.hop_length = hop_length
self.sample_rate = sample_rate
# initialize parameters
initialize(self, init_type)
self.upsample = UpsampleNetwork(
aux_channels=aux_channels,
upsample_scales=upsample_scales,
compute_dims=compute_dims,
res_blocks=res_blocks,
res_out_dims=res_out_dims,
aux_context_window=aux_context_window)
self.I = nn.Linear(aux_channels + self.aux_dims + 1, rnn_dims)
self.rnn1 = nn.GRU(rnn_dims, rnn_dims)
self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims)
self._to_flatten += [self.rnn1, self.rnn2]
self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
self.fc3 = nn.Linear(fc_dims, self.n_classes)
# Avoid fragmentation of RNN parameters and associated warning
self._flatten_parameters()
nn.initializer.set_global_initializer(None)
def forward(self, x, c):
'''
Args:
x (Tensor): wav sequence, [B, T]
c (Tensor): mel spectrogram [B, C_aux, T']
T = (T' - 2 * aux_context_window ) * hop_length
Returns:
Tensor: [B, T, n_classes]
'''
# Although we `_flatten_parameters()` on init, when using DataParallel
# the model gets replicated, making it no longer guaranteed that the
# weights are contiguous in GPU memory. Hence, we must call it again
self._flatten_parameters()
bsize = paddle.shape(x)[0]
h1 = paddle.zeros([1, bsize, self.rnn_dims])
h2 = paddle.zeros([1, bsize, self.rnn_dims])
# c: [B, T, C_aux]
# aux: [B, T, res_out_dims]
c, aux = self.upsample(c)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = paddle.concat([x.unsqueeze(-1), c, a1], axis=2)
x = self.I(x)
res = x
x, _ = self.rnn1(x, h1)
x = x + res
res = x
x = paddle.concat([x, a2], axis=2)
x, _ = self.rnn2(x, h2)
x = x + res
x = paddle.concat([x, a3], axis=2)
x = F.relu(self.fc1(x))
x = paddle.concat([x, a4], axis=2)
x = F.relu(self.fc2(x))
return self.fc3(x)
@paddle.no_grad()
def generate(self,
c,
batched: bool=True,
target: int=12000,
overlap: int=600,
mu_law: bool=True,
gen_display: bool=False):
"""
Args:
c(Tensor): input mels, (T', C_aux)
batched(bool): generate in batch or not
target(int): target number of samples to be generated in each batch entry
overlap(int): number of samples for crossfading between batches
mu_law(bool)
Returns:
wav sequence: Output (T' * prod(upsample_scales), out_channels, C_out).
"""
self.eval()
mu_law = mu_law if self.mode == 'RAW' else False
output = []
start = time.time()
# pseudo batch
# (T, C_aux) -> (1, C_aux, T)
c = paddle.transpose(c, [1, 0]).unsqueeze(0)
T = paddle.shape(c)[-1]
wave_len = T * self.hop_length
# TODO remove two transpose op by modifying function pad_tensor
c = self.pad_tensor(
c.transpose([0, 2, 1]), pad=self.aux_context_window,
side='both').transpose([0, 2, 1])
c, aux = self.upsample(c)
if batched:
# (num_folds, target + 2 * overlap, features)
c = self.fold_with_overlap(c, target, overlap)
aux = self.fold_with_overlap(aux, target, overlap)
# for dygraph to static graph, if use seq_len of `b_size, seq_len, _ = paddle.shape(c)` in for
# will not get TensorArray
# see https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/04_dygraph_to_static/case_analysis_cn.html#list-lodtensorarray
# b_size, seq_len, _ = paddle.shape(c)
b_size = paddle.shape(c)[0]
seq_len = paddle.shape(c)[1]
h1 = paddle.zeros([b_size, self.rnn_dims])
h2 = paddle.zeros([b_size, self.rnn_dims])
x = paddle.zeros([b_size, 1])
d = self.aux_dims
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
for i in range(seq_len):
m_t = c[:, i, :]
# for dygraph to static graph
# a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split)
a1_t = aux_split[0][:, i, :]
a2_t = aux_split[1][:, i, :]
a3_t = aux_split[2][:, i, :]
a4_t = aux_split[3][:, i, :]
x = paddle.concat([x, m_t, a1_t], axis=1)
x = self.I(x)
# use GRUCell here
h1, _ = self.rnn1[0].cell(x, h1)
x = x + h1
inp = paddle.concat([x, a2_t], axis=1)
# use GRUCell here
h2, _ = self.rnn2[0].cell(inp, h2)
x = x + h2
x = paddle.concat([x, a3_t], axis=1)
x = F.relu(self.fc1(x))
x = paddle.concat([x, a4_t], axis=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
if self.mode == 'MOL':
sample = sample_from_discretized_mix_logistic(
logits.unsqueeze(0).transpose([0, 2, 1]))
output.append(sample.reshape([-1]))
x = sample.transpose([1, 0, 2])
elif self.mode == 'RAW':
posterior = F.softmax(logits, axis=1)
distrib = paddle.distribution.Categorical(posterior)
# corresponding operate [np.floor((fx + 1) / 2 * mu + 0.5)] in enocde_mu_law
# distrib.sample([1])[0].cast('float32'): [0, 2**bits-1]
# sample: [-1, 1]
sample = 2 * distrib.sample([1])[0].cast('float32') / (
self.n_classes - 1.) - 1.
output.append(sample)
x = sample.unsqueeze(-1)
else:
raise RuntimeError('Unknown model mode value - ', self.mode)
if gen_display:
if i % 1000 == 0:
self.gen_display(i, int(seq_len), int(b_size), start)
output = paddle.stack(output).transpose([1, 0])
if mu_law:
output = decode_mu_law(output, self.n_classes, False)
if batched:
output = self.xfade_and_unfold(output, target, overlap)
else:
output = output[0]
# Fade-out at the end to avoid signal cutting out suddenly
fade_out = paddle.linspace(1, 0, 10 * self.hop_length)
output = output[:wave_len]
output[-10 * self.hop_length:] *= fade_out
self.train()
# 增加 C_out 维度
return output.unsqueeze(-1)
def _flatten_parameters(self):
[m.flatten_parameters() for m in self._to_flatten]
def pad_tensor(self, x, pad, side='both'):
'''
Args:
x(Tensor): mel, [1, n_frames, 80]
pad(int):
side(str, optional): (Default value = 'both')
Returns:
Tensor
'''
b, t, _ = paddle.shape(x)
# for dygraph to static graph
c = x.shape[-1]
total = t + 2 * pad if side == 'both' else t + pad
padded = paddle.zeros([b, total, c])
if side == 'before' or side == 'both':
padded[:, pad:pad + t, :] = x
elif side == 'after':
padded[:, :t, :] = x
return padded
def fold_with_overlap(self, x, target, overlap):
'''
Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold()
Args:
x(Tensor): Upsampled conditioning features. mels or aux
shape=(1, T, features)
mels: [1, T, 80]
aux: [1, T, 128]
target(int): Target timesteps for each index of batch
overlap(int): Timesteps for both xfade and rnn warmup
Returns:
Tensor:
shape=(num_folds, target + 2 * overlap, features)
num_flods = (time_seq - overlap) // (target + overlap)
mel: [num_folds, target + 2 * overlap, 80]
aux: [num_folds, target + 2 * overlap, 128]
Details:
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning features
Eg: target=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
'''
_, total_len, features = paddle.shape(x)
# Calculate variables needed
num_folds = (total_len - overlap) // (target + overlap)
extended_len = num_folds * (overlap + target) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
num_folds += 1
padding = target + 2 * overlap - remaining
x = self.pad_tensor(x, padding, side='after')
folded = paddle.zeros([num_folds, target + 2 * overlap, features])
# Get the values for the folded tensor
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
folded[i] = x[0][start:end, :]
return folded
def xfade_and_unfold(self, y, target: int=12000, overlap: int=600):
''' Applies a crossfade and unfolds into a 1d array.
Args:
y (Tensor):
Batched sequences of audio samples
shape=(num_folds, target + 2 * overlap)
dtype=paddle.float32
overlap (int): Timesteps for both xfade and rnn warmup
Returns:
Tensor
audio samples in a 1d array
shape=(total_len)
dtype=paddle.float32
Details:
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_target, seq1_out],
[seq2_in, seq2_target, seq2_out],
[seq3_in, seq3_target, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...]
'''
# num_folds = (total_len - overlap) // (target + overlap)
num_folds, length = paddle.shape(y)
target = length - 2 * overlap
total_len = num_folds * (target + overlap) + overlap
# Need some silence for the run warmup
slience_len = overlap // 2
fade_len = overlap - slience_len
slience = paddle.zeros([slience_len], dtype=paddle.float32)
linear = paddle.ones([fade_len], dtype=paddle.float32)
# Equal power crossfade
# fade_in increase from 0 to 1, fade_out reduces from 1 to 0
t = paddle.linspace(-1, 1, fade_len, dtype=paddle.float32)
fade_in = paddle.sqrt(0.5 * (1 + t))
fade_out = paddle.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_out = paddle.concat([linear, fade_out])
fade_in = paddle.concat([slience, fade_in])
# Apply the gain to the overlap samples
y[:, :overlap] *= fade_in
y[:, -overlap:] *= fade_out
unfolded = paddle.zeros([total_len], dtype=paddle.float32)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
unfolded[start:end] += y[i]
return unfolded
def gen_display(self, i, seq_len, b_size, start):
gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
pbar = self.progbar(i, seq_len)
msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
sys.stdout.write(f"\r{msg}")
def progbar(self, i, n, size=16):
done = int(i * size) // n
bar = ''
for i in range(size):
bar += '█' if i <= done else '░'
return bar
class WaveRNNInference(nn.Layer):
def __init__(self, normalizer, wavernn):
super().__init__()
self.normalizer = normalizer
self.wavernn = wavernn
def forward(self,
logmel,
batched: bool=True,
target: int=12000,
overlap: int=600,
mu_law: bool=True,
gen_display: bool=False):
normalized_mel = self.normalizer(logmel)
wav = self.wavernn.generate(
normalized_mel, )
# batched=batched,
# target=target,
# overlap=overlap,
# mu_law=mu_law,
# gen_display=gen_display)
return wav
| StarcoderdataPython |
9797126 | # Copyright 2018 <NAME> <<EMAIL>>
# 2018 <NAME> <<EMAIL>>
#
# This file is part of PyDDM, and is available under the MIT license.
# Please see LICENSE.txt in the root directory for more information.
__all__ = ["Bound", "BoundConstant", "BoundCollapsingLinear", "BoundCollapsingExponential"]
import numpy as np
from .base import Dependence
from paranoid import *
class Bound(Dependence):
"""Subclass this to specify how bounds vary with time.
This abstract class provides the methods which define a dependence
of the bounds on t. To subclass it, implement get_bound. All
subclasses must include a parameter `B` in required_parameters,
which is the upper bound at the start of the simulation. (The
lower bound is symmetrically -B.)
Also, since it inherits from Dependence, subclasses must also
assign a `name` and `required_parameters` (see documentation for
Dependence.)
"""
depname = "Bound"
def get_bound(self, t, conditions, **kwargs):
"""Calculate the bounds which particles cross to determine response time.
This function must be redefined in subclasses.
It may take up to two arguments:
- `t` - The time at which bound should be calculated
- `conditions` - A dictionary describing the task conditions
It should return a non-negative number indicating the upper
bound at that particular time, and task conditions. The lower
bound is taken to be the negative of the upper bound.
Definitions of this method in subclasses should only have
arguments for needed variables and should always be followed
by "**kwargs". For example, if the function does not depend
on task conditions but does depend on time, this should
be:
| def get_bound(self, t, **kwargs):
Of course, the function would still work properly if
`conditions` were included as an argument, but this convention
allows PyDDM to automatically select the best simulation
methods for the model.
"""
raise NotImplementedError("Bound model %s invalid: must define the get_bound function" % self.__class__.__name__)
@paranoidclass
class BoundConstant(Bound):
"""Bound dependence: bound is constant throuhgout the simulation.
Takes only one parameter: `B`, the constant bound.
Example usage:
| bound = BoundConstant(B=1.5) # Bound at 1.5 and -1.5
"""
name = "constant"
required_parameters = ["B"]
@staticmethod
def _test(v):
assert v.B in Positive()
@staticmethod
def _generate():
yield BoundConstant(B=1)
yield BoundConstant(B=100)
@accepts(Self)
@returns(Positive)
def get_bound(self, *args, **kwargs):
return self.B
@paranoidclass
class BoundCollapsingLinear(Bound):
"""Bound dependence: bound collapses linearly over time.
Takes two parameters:
- `B` - the bound at time t = 0.
- `t` - the slope, i.e. the coefficient of time, should be greater than zero.
Example usage:
| bound = BoundCollapsingLinear(B=1, t=.5) # Collapsing at .5 units per second
"""
name = "collapsing_linear"
required_parameters = ["B", "t"]
@staticmethod
def _test(v):
assert v.B in Positive()
assert v.t in Positive0()
@staticmethod
def _generate():
yield BoundCollapsingLinear(B=1, t=1)
yield BoundCollapsingLinear(B=100, t=50.1)
@accepts(Self, Positive0)
@returns(Positive0)
@ensures("self == self` and t < t` --> return >= return`") # Monotonic decreasing
def get_bound(self, t, *args, **kwargs):
return max(self.B - self.t*t, 0.)
@paranoidclass
class BoundCollapsingExponential(Bound):
"""Bound dependence: bound collapses exponentially over time.
Takes two parameters:
- `B` - the bound at time t = 0.
- `tau` - the time constant for the collapse, should be greater than zero.
Example usage:
| bound = BoundCollapsingExponential(B=1, tau=2.1) # Collapsing with time constant 2.1
"""
name = "collapsing_exponential"
required_parameters = ["B", "tau"]
@staticmethod
def _test(v):
assert v.B in Positive()
assert v.tau in Positive()
@staticmethod
def _generate():
yield BoundCollapsingExponential(B=1, tau=1)
yield BoundCollapsingExponential(B=.1, tau=.001)
yield BoundCollapsingExponential(B=100, tau=100)
@accepts(Self, Positive0)
@returns(Positive0)
@ensures("self == self` and t < t` --> return >= return`") # Monotonic decreasing
def get_bound(self, t, *args, **kwargs):
return self.B * np.exp(-self.tau*t)
| StarcoderdataPython |
1757747 | from pra_request_tracker.settings import * # noqa: F403
MEDIA_ROOT = APPS_DIR / "media" # noqa: F405
if "DEFAULT_FILE_STORAGE" in globals():
del DEFAULT_FILE_STORAGE # noqa: F821
| StarcoderdataPython |
6677564 | <filename>onlinevars/apps.py
from django.apps import AppConfig
class OnlinevarsConfig(AppConfig):
name = 'onlinevars'
| StarcoderdataPython |
3569237 | <filename>my_code.py
# Collaborators (including web sites where you got help: none
#
import random
def news_story():#function selects news story and corresponding affect on market
print("This weeks headlines: ")
print(".")
news_number=random.randint(1,10)
if news_number==1:
print(f"There is a hurricane coming! Get ready to stock up and stay at home!")
hurricane=random.randint(95, 105)/100#I use this format to get floats out of randint
news_value=hurricane
elif news_number==2:
print("Detroit to host Olympics! Expected Boom in economy!")
olympics=random.randint(1010, 1015)/1000
news_value=olympics
elif news_number==3:
print("Social justice issues cause uprisings in urban cities!")
uprising=random.randint(90, 95)/100
news_value=uprising
elif news_number==4:
print("Presidental election causes uncertainty in the market")
election=random.randint(85,95)/100
news_value=election
elif news_number==5:
print("Water Bottle Companies Facing scrutinee for poisonous plastic")
plastic=random.randint(60, 80)/100
news_value=plastic
elif news_number==6:
print("Oil found beneath california! Oils prices drop enourmously!")
oil=random.randint(80,110)/100
news_value=oil
elif news_number==7:
print("Strict restrictions placed on gas cars all around the US")
gas_car_restrictions=random.randint(80,120)/100
news_value=gas_car_restrictions
elif news_number==8:
print("Competing between social networks leads to a boom in social media use!")
social_use=random.randint(120, 140)/100
news_value=social_use
elif news_number==9:
print("Farmers all around the United States going bankrupt!")
farmer_bankrupcy=random.randint(60,90)/100
news_value=farmer_bankrupcy
elif news_number==10:
print("Schools have been shutdown due to a strike sparked by teachers!")
school_cancelled=random.randint(900, 1001)/1000
news_value=school_cancelled
return news_value
print("Welcome to a stock market simulator!")
print("You will be given an arbitrary news story. Every investment you make the news story changes. Only the final story matters for the market. Invest accordingly. This simulation is meant to provide practice for real investing.")
print("You have $10,000 to spend")
#The starting price for stocks
tesla=random.randint(100,115)
print(f"1. a share of tesla cost {tesla}")
papermate=random.randint(65, 80)
print(f"2. a share of papermate cost {papermate}")
safeway=random.randint(80,115)
print(f"3. a share of safeway cost {safeway}")
honda=random.randint(110, 130)
print(f"4. a share of honda cost {honda}")
chevron=random.randint(110,125)
print(f"5. a share of chevron cost {chevron}")
arrowhead=random.randint(80,90)
print(f"6. a share of arrowhead cost {arrowhead}")
lewies_burgers=random.randint(20,23)
print(f"7. a share of Lewies Burgers cost {lewies_burgers}")
total=10000 #used to find how much money you have left
y=0#used for while loop
#setting shares bought until line 81:
tesla_shares=0
papermate_shares=0
safeway_shares=0
honda_shares=0
chevron_shares=0
arrowhead_shares=0
lewies_burgers_shares=0
spent=[]#numbers put on and taken off for every investment
spent2=[]#used for total in market calculation (values dont clear)
final_profit=0#used to find the total profit if all rounds
while y<3: #runs 3 rounds
news=news_story()
investment=(input("Type the number of the stock to invest in it or type 'done' if you are finished investing: "))
try:
investment=int(investment)
if investment!="done" and investment!="Done":
if investment==1: #buying shares until line 133
how_much=int(input("How many shares of tesla would you like to buy: " ))
tesla_shares=tesla_shares+how_much
tesla_spent=tesla*how_much
spent.append(tesla_spent)
spent2.append(tesla_spent)
elif investment==2:
how_much=int(input("How many shares of papermate would you like to buy: " ))
papermate_shares=papermate_shares+how_much
papermate_spent=papermate*how_much
spent.append(papermate_spent)
spent2.append(papermate_spent)
elif investment==3:
how_much=int(input("How many shares of safeway would you like to buy: " ))
safeway_shares=safeway_shares+how_much
safeway_spent=safeway*how_much
spent.append(safeway_spent)
spent2.append(safeway_spent)
elif investment==4:
how_much=int(input("How many shares of honda would you like to buy: " ))
honda_shares=honda_shares+how_much
honda_spent=honda*how_much
spent.append(honda_spent)
spent2.append(honda_spent)
elif investment==5:
how_much=int(input("How many shares of chevron would you like to buy: " ))
chevron_shares=chevron_shares+how_much
chevron_spent=chevron*how_much
spent.append(chevron_spent)
spent2.append(chevron_spent)
elif investment==6:
how_much=int(input("How many shares of arrowhead would you like to buy: " ))
arrowhead_shares=arrowhead_shares+how_much
arrowhead_spent=arrowhead*how_much
spent.append(arrowhead_spent)
spent2.append(arrowhead_spent)
elif investment==7:
how_much=int(input("How many shares of lewies_burgers would you like to buy: " ))
lewies_burgers_shares=lewies_burgers_shares+how_much
lewies_burgers_spent=lewies_burgers*how_much
spent.append(lewies_burgers_spent)
except:
if investment=="done" or investment=="Done":#finished round protocal
total=total-sum(spent)
spent=[]
print(f"You have {total} dollars left in the bank")
totalInMarket=sum(spent2)
stocksafterchange=[]
stocks_changing=[tesla, papermate, safeway, honda, chevron, arrowhead, lewies_burgers]
for x in stocks_changing:
x=x*news #multiplys all stocks buy result of the news story function
stocksafterchange.append(x)
profit=sum(stocksafterchange)-totalInMarket
profit=round(profit, 2)
if profit>0:
print(f"You made {profit} dollars in profit this round")
elif profit==0:
print("You broke even this round")
elif profit<0:
print(f"You lost {profit} dollars this round")
print("ROUND COMPLETED")
final_profit=final_profit+profit
tesla=tesla*news#setting the new stock values to print
tesla=round(tesla,2)
papermate=papermate*news
papermate=round(papermate, 2)
safeway=safeway*news
safeway=round(safeway, 2)
honda=honda*news
honda=round(honda,2)
chevron=chevron*news
chevron=round(chevron, 2)
arrowhead=arrowhead*news
arrowhead=round(arrowhead,2)
lewies_burgers=lewies_burgers*news
lewies_burgers=round(lewies_burgers, 2)
print(f"1. a share of tesla cost {tesla}")
print(f"2. a share of papermate cost {papermate}")
print(f"3. a share of safeway cost {safeway}")
print(f"4. a share of honda cost {honda}")
print(f"5. a share of chevron cost {chevron}")
print(f"6. a share of arrowhead cost {arrowhead}")
print(f"7. a share of Lewies Burgers cost {lewies_burgers}")
news=news_story()
y=y+1
else:
print("input is invalid")
print(" ")
print(" ")
print(f"The total result of you money in the market was {final_profit} dollars!")
#def investment():
# round_num=0
# while round_num==0:
# starting_stocks()
# news_story()
# round_num=+1
#investment()
#def control_center():
#print("Welcome to the market! You have 10,000 dollars to spend. Save it, spend it, risk it!")
#nav=input("Type '1' to see weekly news storys, type '2' to see availible stocks and prices, and type 3 to insider trade! ")
#if nav==1:
#news_story()
#if nav==2:
#
#control_center()
| StarcoderdataPython |
259072 | import unittest
from queue import Queue
class QueueTests(unittest.TestCase):
# create an instance of the Queue Class
def setUp(self):
self.dll = Queue()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9742883 | # encoding: utf-8
from apiutil.support import APISupportLayer
from ._exceptions import TemplateAPIError, TemplateAPIMissing, TemplateAPIUnresponsive
from .apis import template
class EmptyTemplateAPILayer(APISupportLayer):
""" An Empty API support layer! Pretty useless really! """
class TemplateAPILayer(APISupportLayer):
""" An API support layer with a single API """
APIS = [
template
]
# Custom Exceptions
API_ERROR = TemplateAPIError
API_MISSING = TemplateAPIMissing
API_UNRESPONSIVE = TemplateAPIUnresponsive
| StarcoderdataPython |
3403049 | """
Create Rendering Provider Specialty and Taxonomy SAS Formats
Purpose:
- Retrieve the latest CMS crosswalk for rendering provider specialty
and taxonomy codes and descriptions.
- Clean and export this data to SAS format tables on the SAS server.
- Create a CSV copy of the crosswalk to accompany these datasets.
Prerequisites:
- In addition to installing Python/Anaconda on your computer,
you will also need to install the requests and saspy modules using the
'conda install requests' and 'conda install saspy' commands in Anaconda
Prompt.
- You will also need to configure saspy.
"""
import requests
import csv
import os.path
import re
import saspy
import pandas as pd
import sys
from copy import deepcopy
from operator import itemgetter
# INPUT DATA PARAMETERS
landing = ('https://data.cms.gov/Medicare-Enrollment/'
'CROSSWALK-MEDICARE-PROVIDER-SUPPLIER-to-HEALTHCARE/j75i-rw8y')
# Page with a download link to the needed CSV data
site_code = landing[len(landing) - landing[::-1].find('/'):]
# The download site for DATA.CMS.GOV uses the landing site's code
site = ('https://data.cms.gov/api/views/'
+ site_code
+ '/rows.csv?accessType=DOWNLOAD')
# This template can likely be used for any DATA.CMS.GOV file
ecode = 'utf-8-sig'
# utf-8 would work here, but utf-8-sig can also handle the byte order mark
# (BOM) characters commonly used in Excel UTF-8
# OUTPUT DATA PARAMETERS
sas = saspy.SASsession(cfgname='pdw_config')
sas_code = sas.submit("""
LIBNAME fmt "/sasprod/dw/formats/source/staging";
""")
grid = ("//grid/sasprod/dw/formats/source")
out_file = ("//grid/sasprod/dw/formats/source/references/"
"cms_rendspec_taxrend_taxtype.csv")
out_rendspec = list()
out_taxrend = list()
out_taxtype = list()
# PROCESS WEB DATA IN MEMORY
with requests.Session() as my_session:
raw_source = my_session.get(site)
# Pull in the website's CSV data as a Requests object
source = raw_source.content.decode(ecode)
# Convert the website's CSV data into a decoded text string
src_reader = csv.reader(source.splitlines(), delimiter=',')
# Read in the the newly-created string line by line
with open(out_file, 'w', newline = '') as foo:
out_write = csv.writer(foo
, delimiter=','
, quotechar='\''
, quoting = csv.QUOTE_ALL)
# Create a writer object for printing to a new output CSV file
row_counter = 0
header = str()
prior_filled_row = list()
for row in src_reader:
# Process each row in the string created from the website data
row_counter += 1
if (row_counter == 1 and row != ['MEDICARE SPECIALTY CODE'
, 'MEDICARE PROVIDER/SUPPLIER TYPE DESCRIPTION'
, 'PROVIDER TAXONOMY CODE'
, 'PROVIDER TAXONOMY DESCRIPTION']):
sys.exit('Warning: the web data structure has changed.')
elif row_counter == 1:
header = row
out_write.writerow(header)
continue
# Don't add rows that are intended as comments in the CSV
if row[0] and (row[0].strip()[0] == '['
or len(row) != 4):
continue
# Remove extra quotes and replace commas with a placeholder '.'
# to be removed later on
for i in range(len(row)):
if ',' in row[i] and i == 1:
row[i] = row[i].strip('\'').replace(',', '.')
elif ',' in row[i]:
row[i] = row[i].strip('\'').replace(',', '.')
# Resolves issue where a single row contains >1 taxonomy code
# and >1 taxonomy description.
if (len(row[2]) > 10 and len(row[2]) % 10 == 0):
tax_desc_init= row[3]
if re.search('[a-z][A-Z]', tax_desc_init):
tax_desc_list = list()
while re.search('[a-z][A-Z]', tax_desc_init):
s = re.search('[a-z][A-Z]', tax_desc_init).start()
e = re.search('[a-z][A-Z]', tax_desc_init).end()
tax_desc_list.append(tax_desc_init[:s + 1])
tax_desc_init = tax_desc_init[e-1:]
tax_desc_list.append(tax_desc_init)
tax_desc_len = len(tax_desc_list)
else:
tax_desc_list = []
tax_desc_len = 0
tax_desc = tax_desc_init
for i in range(1, int(len(row[2])/10) + 1):
code = row[2][(10 * (i-1)):(10 * i)]
if (tax_desc_len >= i
and tax_desc_list[i-1]):
tax_desc = tax_desc_list[i-1]
new_row = [row[0]
, row[1]
, code
, tax_desc]
# Remove references (e.g., '[1]' and '[12]')
# Populate missing entries for taxrend
filled_row = deepcopy(new_row)
for index, entry in enumerate(new_row):
if not new_row[index]:
new_row[index] = 'N/A'
filled_row[index] = prior_filled_row[index]
else:
new_row[index] = re.sub('\[[0-9]+\]', '', entry)
filled_row[index] = new_row[index]
out_write.writerow(new_row)
out_rendspec.append(['rendspec'
, new_row[0].replace('.', ',').strip()
, new_row[1].replace('.', ',').strip()
, 'C'])
out_taxrend.append(['taxrend'
, filled_row[2].replace('.', ',').strip()
, filled_row[0].replace('.', ',').strip()
, 'C'])
out_taxtype.append(['taxtype'
, new_row[2].replace('.', ',').strip()
, new_row[3].replace('.', ',').strip()
, 'C'])
prior_filled_row = filled_row
# For all other records, simply print to the CSV.
else:
# Remove references (e.g., '[1]' and '[12]')
# Populate missing entries for taxrend
filled_row = deepcopy(row)
for index, entry in enumerate(row):
if not row[index]:
row[index] = 'N/A'
filled_row[index] = prior_filled_row[index]
else:
row[index] = re.sub('\[[0-9]+\]', '', entry)
filled_row[index] = row[index]
out_write.writerow(row)
out_rendspec.append(['rendspec'
, row[0].replace('.', ',').strip()
, row[1].replace('.', ',').strip()
, 'C'])
out_taxrend.append(['taxrend'
, filled_row[2].replace('.', ',').strip()
, filled_row[0].replace('.', ',').strip()
, 'C'])
if row[2] != 'N/A':
out_taxtype.append(['taxtype'
, row[2].replace('.', ',').strip()
, row[3].replace('.', ',').strip()
, 'C'])
prior_filled_row = filled_row
# PREPARE THE FINAL PANDAS TABLE FOR OUTPUTING AS A SAS FORMAT TABLE
initial = {'rendspec': [out_rendspec, []]
, 'taxrend': [out_taxrend, []]
, 'taxtype': [out_taxtype, []]
}
for name, ds_list in initial.items():
ds = ds_list[0]
ds_final = ds_list[1]
ds.sort(key=itemgetter(1))
# Remove duplicate records
for index, row in enumerate(ds):
start, label = row[1], row[2]
if index > 0:
prior_start = ds[index - 1][1]
else:
prior_start = []
if (start != 'N/A' and label != 'N/A' and
start != prior_start):
ds_final.append(row)
else:
continue
ds_final.sort(key=itemgetter(1,2))
df = pd.DataFrame(data=ds_final
, columns=['fmtname', 'start', 'label', 'type'])
sas_out = sas.df2sd(df, table=name, libref='fmt')
sas.disconnect()
| StarcoderdataPython |
161439 | <reponame>betatim/lhcb-voting
import sys
import csv
import operator
from collections import Counter
from itertools import chain
def load_votes(fname="election.csv"):
votes = []
with open(fname) as csvfile:
lines = csv.reader(csvfile)
for line in lines:
votes.append(line[1:])
# slice off the header
return votes[1:]
def valid_token(valid_tokens, ballot):
return ballot[0] in valid_tokens
def unspoilt_ballot(ballot):
return len(ballot) == len(set(ballot))
def remove_multi_voters(ballots):
vote_counts = Counter(ballot[0] for ballot in ballots)
valid_ballots = {}
for ballot in ballots:
token = ballot[0]
# if someone votes more than once all their votes
# get removed
if token not in valid_ballots:
valid_ballots[token] = ballot
else:
valid_ballots.pop(token)
return valid_ballots.values()
def count_votes(rankings, eliminated=set()):
votes = []
for ranking in rankings:
# vote for lowest ranked person,
# not yet eliminated
for person in reversed(ranking):
if person not in eliminated:
print 'voting for', person
votes.append(person)
break
counts = Counter(votes)
ranking = sorted(counts.iteritems(),
key=operator.itemgetter(1),
reverse=True)
# Everyone who received the same number
# of votes as the least popular person
# is eliminated
eliminees = [ranking[0][0]]
threshold_vote = ranking[0][1]
for name,votes_ in ranking[1:]:
if votes_ == threshold_vote:
eliminees.append(name)
return set(eliminees)
class NoCandidatesLeftError(Exception):
pass
class TooManyRoundsError(Exception):
def __init__(self, remaining):
self.remaining = remaining
def determine_winner(ballots):
ballots = list(ballots)
names = set(chain(*ballots))
print "These names received at least one vote:"
print " " + ", ".join(names)
eliminated = set()
rounds = 1
while True:
print
left_over = [name for name in names if name not in eliminated]
if len(left_over) == 1:
print "We have a winner:", left_over[0]
return left_over[0]
print "Round %i"%rounds
eliminated_ = count_votes(ballots, eliminated)
print "eliminated this time:", ", ".join(eliminated_)
eliminated = eliminated.union(set(eliminated_))
left_over = [name for name in names if name not in eliminated]
if len(left_over) == 0:
print "Something went wrong"
print "Everyone has been eliminated"
raise NoCandidatesLeftError()
rounds += 1
if rounds > 50:
print "Something went wrong"
print "After 50 rounds of voting no winner could be found"
print 'left over', left_over
raise TooManyRoundsError(left_over)
if __name__ == "__main__":
votes = load_votes()
print "Loaded a total of %i ballots"%(len(votes))
valid_ballots = filter(unspoilt_ballot, votes)
print "Unspoilt ballots %i"%(len(valid_ballots))
valid_ballots = remove_multi_voters(valid_ballots)
print "Single vote ballots %i"%(len(valid_ballots))
VALID_TOKENS = set(line.strip()
for line in open("valid_tokens.txt").readlines())
valid_token_ = lambda x: valid_token(VALID_TOKENS, x)
valid_ballots = filter(valid_token_, valid_ballots)
print "Valid token ballots %i"%(len(valid_ballots))
if not valid_ballots:
print "No valid ballots left."
sys.exit(1)
determine_winner(ballot[1:] for ballot in valid_ballots)
| StarcoderdataPython |
9734199 | <reponame>swetasingh-tudip/training-data-analyst
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dataset_ml_query = """
CREATE OR REPLACE TABLE `@ML_TABLE_ID`
AS (
WITH
-- Calculate features before CUTOFF_DATE date.
features AS (
SELECT
customer_id,
customer_country,
COUNT(n_purchases) AS n_purchases,
AVG(order_qty) AS avg_purchase_size,
AVG(revenue) AS avg_purchase_revenue,
DATE_DIFF(MAX(order_date), MIN(order_date), DAY) AS customer_age,
DATE_DIFF(DATE('2011-09-01'), MAX(order_date), DAY) AS days_since_last_purchase
FROM
`@CLEAN_TABLE_ID`
WHERE
order_date <= DATE('2011-09-01')
GROUP BY
customer_id,
customer_country),
-- Calculate customer target monetary value over historical period + 3M future period.
label AS (
SELECT
customer_id,
SUM(revenue) AS target_monetary_value_3M
FROM
`@CLEAN_TABLE_ID`
WHERE
order_date < DATE('2011-12-01')
GROUP BY
customer_id
)
SELECT
features.customer_id,
features.customer_country,
features.n_purchases, -- frequency
features.avg_purchase_size,
features.avg_purchase_revenue,
features.customer_age,
features.days_since_last_purchase, --recency
label.target_monetary_value_3M, --monetary
CASE
WHEN MOD(ABS(FARM_FINGERPRINT(CAST(features.customer_id AS STRING))), 10) < 8
THEN 'TRAIN'
WHEN MOD(ABS(FARM_FINGERPRINT(CAST(features.customer_id AS STRING))), 10) = 9
THEN 'VALIDATE'
ELSE
'TEST' END AS data_split
FROM
features
INNER JOIN label
ON features.customer_id = label.customer_id
);
""" | StarcoderdataPython |
3430269 | from urllib.parse import quote
from nonebot import on_shell_command
from nonebot.rule import ArgumentParser
from nonebot.typing import T_State
from nonebot.adapters import Bot, Event
choyen_parser = ArgumentParser()
choyen_parser.add_argument('top')
choyen_parser.add_argument('bottom', nargs='?', default='欲しい!')
choyen = on_shell_command('choyen', aliases={'5000'}, parser=choyen_parser)
@choyen.handle()
async def _(bot: Bot, event: Event, state: T_State):
top = getattr(state['args'], 'top', None)
bottom = getattr(state['args'], 'bottom', None)
if not top or not bottom:
return
top = quote(top)
bottom = quote(bottom)
print(top, bottom)
await choyen.finish([
{"type": "image", "data": {
"file": f'http://zhangjunbo.top:18003/api/v1/gen?top={top}&bottom={bottom}'
}},
])
| StarcoderdataPython |
11356179 | # pylint: disable=C0301,W0201
from __future__ import annotations
from struct import Struct
from typing import Union, Any, TYPE_CHECKING
import numpy as np
from pyNastran.op2.op2_helper import polar_to_real_imag
from pyNastran.op2.op2_interface.op2_reader import mapfmt
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2 import OP2
def read_real_table_static(op2: OP2, obj: Any, flag:str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
dt = np.nan
fmt = mapfmt(op2._endian + b'2i6f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, tx, ty, tz, rx, ry, rz) = out
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i; %s\n' % (flag, eid, str(out)))
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_real_table_sort1(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
assert nnodes > 0, nnodes
fmt = mapfmt(op2._endian + b'2i6f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, tx, ty, tz, rx, ry, rz) = out
assert grid_type != 1065353216, out # caused by an op2 writer bug with int64 numbers being downcast directly to float32
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i; %s\n' % (flag, eid, str(out)))
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort1_mag(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
fmt = mapfmt(op2._endian + b'2i12f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i %s\n' % (flag, eid, str(out)))
tx = polar_to_real_imag(txr, txi)
ty = polar_to_real_imag(tyr, tyi)
tz = polar_to_real_imag(tzr, tzi)
rx = polar_to_real_imag(rxr, rxi)
ry = polar_to_real_imag(ryr, ryi)
rz = polar_to_real_imag(rzr, rzi)
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort1_imag(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
fmt = mapfmt(op2._endian + b'2i12f', op2.size)
s = Struct(fmt)
assert op2.obj is not None
assert nnodes > 0
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i %s\n' % (flag, eid, str(out)))
tx = complex(txr, txi)
ty = complex(tyr, tyi)
tz = complex(tzr, tzi)
rx = complex(rxr, rxi)
ry = complex(ryr, ryi)
rz = complex(rzr, rzi)
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort2_imag(op2: OP2, obj: Any, node_id: int,
flag: str, flag_type: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
#ntotal = 56 # 14 * 4
fmt = mapfmt(op2._endian + op2._analysis_code_fmt + b'i12f', op2.size)
s = Struct(fmt)
assert op2.obj is not None
assert nnodes > 0
#assert ndata % ntotal == 0
binary_debug_fmt = ' %s=%s %%s\n' % (flag, flag_type)
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(freq, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
if op2.is_debug_file:
op2.binary_debug.write(binary_debug_fmt % (freq, str(out)))
tx = complex(txr, txi)
ty = complex(tyr, tyi)
tz = complex(tzr, tzi)
rx = complex(rxr, rxi)
ry = complex(ryr, ryi)
rz = complex(rzr, rzi)
obj.add_sort2(freq, node_id, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort2_mag(op2: OP2, obj: Any, node_id: int,
flag: str, flag_type: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
s = Struct(mapfmt(op2._endian + op2._analysis_code_fmt + b'i12f', op2.size))
binary_debug_fmt = ' %s=%s %%s\n' % (flag, flag_type)
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(freq, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
if op2.is_debug_file:
op2.binary_debug.write(binary_debug_fmt % (freq, str(out)))
tx = polar_to_real_imag(txr, txi)
ty = polar_to_real_imag(tyr, tyi)
tz = polar_to_real_imag(tzr, tzi)
rx = polar_to_real_imag(rxr, rxi)
ry = polar_to_real_imag(ryr, ryi)
rz = polar_to_real_imag(rzr, rzi)
obj.add_sort2(freq, node_id, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_real_table_sort2(self: OP2, obj: Any, flag: str, nid: int,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
assert nnodes > 0
fmt = mapfmt(self._endian + self._analysis_code_fmt + b'i6f', self.size)
structi = Struct(fmt)
#psds = ('CRM2', 'NO2', 'PSD2', 'RMS2')
#print('sort_method=%s' % self.sort_method)
#if self.table_name_str.endswith(psds):
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = structi.unpack(edata)
(dt, grid_type, tx, ty, tz, rx, ry, rz) = out
if self.is_debug_file:
self.binary_debug.write(
f' nid={nid} {flag}={dt} ({type(dt)}); {str(out)}\n')
obj.add_sort2(dt, nid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
| StarcoderdataPython |
4925728 | <filename>Amazon/count_ways_to_Nth_stairs.py<gh_stars>1-10
import sys
import io
import atexit
class Solution:
#Function to count number of ways to reach the nth stair
#when order does not matter.
# Returns no. of ways to
# reach sth stair
def countWays(self, s):
mod = 1000000007
# code here
return (s//2)+1
#{
# Driver Code Starts
#Initial Template for Python 3
# Contributed by : <NAME>
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases):
m = int(input())
ob = Solution()
print(ob.countWays(m))
# } Driver Code Ends
| StarcoderdataPython |
3541287 | <filename>pyquil/api/quantum_computer.py
from math import pi
from typing import List
import numpy as np
from pyquil.api._qam import QAM
from pyquil.device import AbstractDevice
from pyquil.gates import MEASURE, RX
from pyquil.quil import Program, get_classical_addresses_from_program
from pyquil.quilbase import Measurement, Pragma
def _get_flipped_protoquil_program(program: Program):
"""For symmetrization, generate a program where X gates are added before measurement.
Forest 1.3 is really picky about where the measure instructions happen. It has to be
at the end!
"""
program = Program(program.instructions) # Copy
to_measure = []
while len(program) > 0:
inst = program.instructions[-1]
if isinstance(inst, Measurement):
program.pop()
to_measure.append((inst.qubit, inst.classical_reg))
else:
break
program += Pragma('PRESERVE_BLOCK')
for qu, addr in to_measure[::-1]:
program += RX(pi, qu)
program += Pragma('END_PRESERVE_BLOCK')
for qu, addr in to_measure[::-1]:
program += Measurement(qubit=qu, classical_reg=addr)
return program
class QuantumComputer:
def __init__(self, *, name: str, qam: QAM, device: AbstractDevice, symmetrize_readout=False):
"""
A quantum computer for running quantum programs.
A quantum computer has various characteristics like supported gates, qubits, qubit
topologies, gate fidelities, and more. A quantum computer also has the ability to
run quantum programs.
A quantum computer can be a real Rigetti QPU that uses superconducting transmon
qubits to run quantum programs, or it can be an emulator like the Rigetti QVM with
noise models and mimicked topologies.
:param name: A string identifying this particular quantum computer.
:param qam: A quantum abstract machine which handles executing quantum programs. This
dispatches to a QVM or QPU.
:param device: A collection of connected qubits and associated specs and topology.
:param symmetrize_readout: Whether to apply readout error symmetrization. See
:py:func:`run_symmetrized_readout` for a complete description.
"""
self.name = name
self.qam = qam
self.device = device
self.symmetrize_readout = symmetrize_readout
def qubit_topology(self):
return self.device.qubit_topology()
def get_isa(self, oneq_type='Xhalves', twoq_type='CZ'):
return self.device.get_isa(oneq_type=oneq_type, twoq_type=twoq_type)
def run(self, program, classical_addresses, trials, symmetrize_readout=None) -> np.ndarray:
"""
Run a quil program.
:param program: The program to run. You probably want to put MEASURE instructions
in your program somewhere (like at the end) because qubits are not automatically
measured
:param classical_addresses: The addresses of the classical bits to return. These don't
necessarily correspond to qubit indices; rather they are the second argument to
any MEASURE instructions you've added to your program
:param trials: The number of times to run the program.
:param symmetrize_readout: Whether to apply readout error symmetrization. If not
specified, the instance attribute ``symmetrize_readout`` will be used. See
:py:func:`run_symmetrized_readout` for a complete description.
:return: A numpy array of shape (trials, len(classical_addresses)) that contains 0s and 1s
"""
if symmetrize_readout is None:
symmetrize_readout = self.symmetrize_readout
if not classical_addresses:
classical_addresses = get_classical_addresses_from_program(program)
if symmetrize_readout:
return self.run_symmetrized_readout(program, classical_addresses, trials)
return self.qam.run(program, classical_addresses, trials)
def run_async(self, program, classical_addresses, trials, symmetrize_readout=None) -> str:
"""
Queue a quil program for running, but return immediately with a job id.
Use :py:func:`QuantumComputer.wait_for_job` to get the actual job results, probably
after queueing up a whole batch of jobs.
See :py:func:`run` for this function's parameter descriptions.
:returns: a job id
"""
if symmetrize_readout is None:
symmetrize_readout = self.symmetrize_readout
if not classical_addresses:
classical_addresses = get_classical_addresses_from_program(program)
if symmetrize_readout:
raise NotImplementedError("Async symmetrized readout isn't supported")
return self.qam.run_async(program, classical_addresses, trials)
def run_symmetrized_readout(self, program, classical_addresses, trials):
"""
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
"""
flipped_program = _get_flipped_protoquil_program(program)
if trials % 2 != 0:
raise ValueError("Using symmetrized measurement functionality requires that you "
"take an even number of trials.")
half_trials = trials // 2
samples = self.run(program, classical_addresses, half_trials, symmetrize_readout=False)
flipped_samples = self.run(flipped_program, classical_addresses, half_trials,
symmetrize_readout=False)
double_flipped_samples = np.logical_not(flipped_samples).astype(int)
results = np.concatenate((samples, double_flipped_samples), axis=0)
np.random.shuffle(results)
return results
def run_and_measure(self, program: Program, qubits: List[int], trials: int,
symmetrize_readout=None):
"""
Run the provided state preparation program and measure all qubits contained in the program.
.. note::
In contrast to :py:class:`QVMConnection.run_and_measure`, this method simulates
noise correctly for noisy QVMs. However, this method is slower for ``trials > 1``.
For faster noise-free simulation, consider
:py:class:`WavefunctionSimulator.run_and_measure`.
:param program: The state preparation program to run and then measure.
:param qubits: Qubit indices to measure.
:param trials: The number of times to run the program.
:param symmetrize_readout: Whether to apply readout error symmetrization. If not specified,
the class attribute ``symmetrize_readout`` will be used. See
:py:func:`run_symmetrized_readout` for a complete description.
:return: A numpy array of shape (trials, len(qubits)) that contains 0s and 1s
"""
new_prog = Program().inst(program) # make a copy?
classical_addrs = list(range(len(qubits)))
for q, i in zip(qubits, classical_addrs):
new_prog += MEASURE(q, i)
return self.run(program=new_prog, classical_addresses=classical_addrs,
trials=trials, symmetrize_readout=symmetrize_readout)
def wait_for_job(self, job_id):
return self.qam.wait_for_job(job_id)
def __str__(self):
return self.name
| StarcoderdataPython |
3409858 | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
import pytest
import datetime
from io import BytesIO
from zipfile import ZipFile
from lxml.etree import iterparse, fromstring
from openpyexcel import load_workbook
from openpyexcel.compat import unicode
from openpyexcel.xml.constants import SHEET_MAIN_NS
from openpyexcel.utils.indexed_list import IndexedList
from openpyexcel.worksheet import Worksheet
from openpyexcel.worksheet.pagebreak import Break, PageBreak
from openpyexcel.packaging.relationship import Relationship, RelationshipList
from openpyexcel.utils.datetime import CALENDAR_WINDOWS_1900, CALENDAR_MAC_1904
def test_get_xml_iter():
#1 file object
#2 stream (file-like)
#3 string
#4 zipfile
from openpyexcel.reader.worksheet import _get_xml_iter
from tempfile import TemporaryFile
FUT = _get_xml_iter
s = b""
stream = FUT(s)
assert isinstance(stream, BytesIO), type(stream)
u = unicode(s)
stream = FUT(u)
assert isinstance(stream, BytesIO), type(stream)
f = TemporaryFile(mode='rb+', prefix='openpyexcel.', suffix='.unpack.temp')
stream = FUT(f)
assert stream == f
f.close()
t = TemporaryFile()
z = ZipFile(t, mode="w")
z.writestr("test", "whatever")
stream = FUT(z.open("test"))
assert hasattr(stream, "read")
try:
z.close()
except IOError:
# you can't just close zipfiles in Windows
z.close() # python 2.7
@pytest.fixture
def Workbook():
from openpyexcel.styles.styleable import StyleArray
from openpyexcel.styles import numbers
class DummyStyle:
number_format = numbers.FORMAT_GENERAL
font = ""
fill = ""
border = ""
alignment = ""
protection = ""
def copy(self, **kw):
return self
class DummyWorkbook:
guess_types = False
data_only = False
_colors = []
encoding = "utf8"
epoch = CALENDAR_WINDOWS_1900
def __init__(self):
self._differential_styles = []
self.shared_strings = IndexedList()
self.shared_strings.add("hello world")
self._fonts = IndexedList()
self._fills = IndexedList()
self._number_formats = IndexedList()
self._borders = IndexedList()
self._alignments = IndexedList()
self._protections = IndexedList()
self._cell_styles = IndexedList()
self.vba_archive = None
for i in range(29):
self._cell_styles.add((StyleArray([i]*9)))
self._cell_styles.add(StyleArray([0,4,6,0,0,1,0,0,0])) #fillId=4, borderId=6, alignmentId=1))
self.sheetnames = []
def create_sheet(self, title):
return Worksheet(self)
return DummyWorkbook()
@pytest.fixture
def WorkSheetParser(Workbook):
"""Setup a parser instance with an empty source"""
from .. worksheet import WorkSheetParser
ws = Workbook.create_sheet('sheet')
return WorkSheetParser(ws, None, {0:'a'})
@pytest.fixture
def WorkSheetParserKeepVBA(Workbook):
"""Setup a parser instance with an empty source"""
Workbook.vba_archive=True
from .. worksheet import WorkSheetParser
ws = Workbook.create_sheet('sheet')
return WorkSheetParser(ws, {0:'a'}, {})
def test_col_width(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
with open("complex-styles-worksheet.xml", "rb") as src:
cols = iterparse(src, tag='{%s}col' % SHEET_MAIN_NS)
for _, col in cols:
parser.parse_column_dimensions(col)
assert set(ws.column_dimensions) == set(['A', 'C', 'E', 'I', 'G'])
assert ws.column_dimensions['A'].style_id == 0
assert dict(ws.column_dimensions['A']) == {'max': '1', 'min': '1',
'customWidth': '1',
'width': '31.1640625'}
def test_hidden_col(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
with open("hidden_rows_cols.xml", "rb") as src:
cols = iterparse(src, tag='{%s}col' % SHEET_MAIN_NS)
for _, col in cols:
parser.parse_column_dimensions(col)
assert 'D' in ws.column_dimensions
assert dict(ws.column_dimensions['D']) == {'customWidth': '1', 'hidden':
'1', 'max': '4', 'min': '4'}
def test_styled_col(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
with open("complex-styles-worksheet.xml", "rb") as src:
cols = iterparse(src, tag='{%s}col' % SHEET_MAIN_NS)
for _, col in cols:
parser.parse_column_dimensions(col)
assert 'I' in ws.column_dimensions
cd = ws.column_dimensions['I']
assert cd.style_id == 28
assert dict(cd) == {'customWidth': '1', 'max': '9', 'min': '9', 'width': '25', 'style':'28'}
def test_hidden_row(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
with open("hidden_rows_cols.xml", "rb") as src:
rows = iterparse(src, tag='{%s}row' % SHEET_MAIN_NS)
for _, row in rows:
parser.parse_row(row)
assert 2 in ws.row_dimensions
assert dict(ws.row_dimensions[2]) == {'hidden': '1'}
def test_styled_row(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
parser.shared_strings = dict((i, i) for i in range(30))
with open("complex-styles-worksheet.xml", "rb") as src:
rows = iterparse(src, tag='{%s}row' % SHEET_MAIN_NS)
for _, row in rows:
parser.parse_row(row)
assert 23 in ws.row_dimensions
rd = ws.row_dimensions[23]
assert rd.style_id == 28
assert dict(rd) == {'s':'28', 'customFormat':'1'}
def test_sheet_protection(datadir, WorkSheetParser):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
with open("protected_sheet.xml", "rb") as src:
tree = iterparse(src, tag='{%s}sheetProtection' % SHEET_MAIN_NS)
for _, tag in tree:
parser.parse_sheet_protection(tag)
assert dict(ws.protection) == {
'autoFilter': '0', 'deleteColumns': '0',
'deleteRows': '0', 'formatCells': '0', 'formatColumns': '0', 'formatRows':
'0', 'insertColumns': '0', 'insertHyperlinks': '0', 'insertRows': '0',
'objects': '0', 'password': '<PASSWORD>', 'pivotTables': '0', 'scenarios': '0',
'selectLockedCells': '0', 'selectUnlockedCells': '0', 'sheet': '1', 'sort':
'0'
}
def test_formula_without_value(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:f>IF(TRUE, "y", "n")</x:f>
<x:v />
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'f'
assert ws['A1'].value == '=IF(TRUE, "y", "n")'
def test_formula(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" t="str" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:f>IF(TRUE, "y", "n")</x:f>
<x:v>y</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'f'
assert ws['A1'].value == '=IF(TRUE, "y", "n")'
def test_formula_data_only(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
parser.data_only = True
src = """
<x:c r="A1" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:f>1+2</x:f>
<x:v>3</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'n'
assert ws['A1'].value == 3
def test_string_formula_data_only(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
parser.data_only = True
src = """
<x:c r="A1" t="str" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:f>IF(TRUE, "y", "n")</x:f>
<x:v>y</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 's'
assert ws['A1'].value == 'y'
def test_number(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:v>1</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'n'
assert ws['A1'].value == 1
def test_datetime(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" t="d" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:v>2011-12-25T14:23:55</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'd'
assert ws['A1'].value == datetime.datetime(2011, 12, 25, 14, 23, 55)
def test_mac_date():
from openpyexcel.styles.styleable import StyleArray
from openpyexcel.styles import numbers
class DummyWorkbook:
guess_types = False
data_only = False
_colors = []
encoding = "utf8"
epoch = CALENDAR_MAC_1904
def __init__(self):
self._differential_styles = []
self.shared_strings = IndexedList()
self._fonts = IndexedList()
self._fills = IndexedList()
self._number_formats = IndexedList()
self._borders = IndexedList()
self._alignments = IndexedList()
self._protections = IndexedList()
self._cell_styles = IndexedList()
self.vba_archive = None
self._cell_styles.add(StyleArray([0, 0, 0, 14, 0, 0, 0, 0, 0]))
self.sheetnames = []
def create_sheet(self, title):
return Worksheet(self)
from .. worksheet import WorkSheetParser
ws = DummyWorkbook().create_sheet('sheet')
parser = WorkSheetParser(ws, None, {0:'a'})
src = """
<x:c r="A1" s="0" t="n" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:v>41184</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].value == datetime.datetime(2016, 10, 3, 0, 0)
def test_string(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" t="s" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:v>0</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 's'
assert ws['A1'].value == "a"
def test_boolean(WorkSheetParser):
parser = WorkSheetParser
ws = parser.ws
src = """
<x:c r="A1" t="b" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:v>1</x:v>
</x:c>
"""
element = fromstring(src)
parser.parse_cell(element)
assert ws['A1'].data_type == 'b'
assert ws['A1'].value is True
def test_inline_string(WorkSheetParser, datadir):
parser = WorkSheetParser
ws = parser.ws
datadir.chdir()
with open("Table1-XmlFromAccess.xml") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}sheetData/{%s}row/{%s}c" % (SHEET_MAIN_NS, SHEET_MAIN_NS, SHEET_MAIN_NS))
parser.parse_cell(element)
assert ws['A1'].data_type == 's'
assert ws['A1'].value == "ID"
def test_inline_richtext(WorkSheetParser, datadir):
parser = WorkSheetParser
ws = parser.ws
datadir.chdir()
with open("jasper_sheet.xml", "rb") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}sheetData/{%s}row[2]/{%s}c[18]" % (SHEET_MAIN_NS, SHEET_MAIN_NS, SHEET_MAIN_NS))
assert element.get("r") == 'R2'
parser.parse_cell(element)
cell = ws['R2']
assert cell.data_type == 's'
assert cell.value == "11 de September de 2014"
def test_legacy_drawing(datadir):
datadir.chdir()
wb = load_workbook("legacy_drawing.xlsm", keep_vba=True)
sheet1 = wb['Sheet1']
assert sheet1.legacy_drawing == 'xl/drawings/vmlDrawing1.vml'
sheet2 = wb['Sheet2']
assert sheet2.legacy_drawing == 'xl/drawings/vmlDrawing2.vml'
def test_cell_style(WorkSheetParser, datadir):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
parser.shared_strings[1] = "Arial Font, 10"
with open("complex-styles-worksheet.xml") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}sheetData/{%s}row[2]/{%s}c[1]" % (SHEET_MAIN_NS, SHEET_MAIN_NS, SHEET_MAIN_NS))
assert element.get('r') == 'A2'
assert element.get('s') == '2'
parser.parse_cell(element)
assert ws['A2']._style == parser.styles[2]
assert ws['A2'].style_id == 2
def test_cell_exotic_style(WorkSheetParser, datadir):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
parser.styles = [None, None, [0,0,0,0,0,0,1,1,0]]
src = """
<x:c xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main" r="D4" s="2">
</x:c>
"""
sheet = fromstring(src)
parser.parse_cell(sheet)
assert ws['A1'].pivotButton is False
cell = ws['D4']
assert cell.pivotButton is True
assert cell.quotePrefix is True
def test_sheet_views(WorkSheetParser, datadir):
datadir.chdir()
parser = WorkSheetParser
with open("frozen_view_worksheet.xml") as src:
sheet = src.read()
parser.source = sheet
parser.parse()
ws = parser.ws
view = ws.sheet_view
assert view.zoomScale == 200
assert len(view.selection) == 3
def test_legacy_document_keep(WorkSheetParserKeepVBA, datadir):
parser = WorkSheetParserKeepVBA
datadir.chdir()
with open("legacy_drawing_worksheet.xml") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}legacyDrawing" % SHEET_MAIN_NS)
parser.parse_legacy_drawing(element)
assert parser.ws.legacy_drawing == 'rId3'
def test_legacy_document_no_keep(WorkSheetParser, datadir):
parser = WorkSheetParser
datadir.chdir()
with open("legacy_drawing_worksheet.xml") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}legacyDrawing" % SHEET_MAIN_NS)
parser.parse_legacy_drawing(element)
assert parser.ws.legacy_drawing is None
@pytest.fixture
def Translator():
from openpyexcel.formula import translate
return translate.Translator
def test_shared_formula(WorkSheetParser, Translator):
parser = WorkSheetParser
src = """
<x:c r="A9" t="str" xmlns:x="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<x:f t="shared" si="0"/>
<x:v>9</x:v>
</x:c>
"""
element = fromstring(src)
parser.shared_formula_masters['0'] = Translator("=A4*B4", "A1")
parser.parse_cell(element)
assert parser.ws['A9'].value == "=A12*B12"
import warnings
warnings.simplefilter("always") # so that tox doesn't suppress warnings.
def test_extended_conditional_formatting(WorkSheetParser, datadir, recwarn):
datadir.chdir()
parser = WorkSheetParser
with open("extended_conditional_formatting_sheet.xml") as src:
sheet = fromstring(src.read())
element = sheet.find("{%s}extLst" % SHEET_MAIN_NS)
parser.parse_extensions(element)
w = recwarn.pop()
assert issubclass(w.category, UserWarning)
def test_row_dimensions(WorkSheetParser):
src = """<row r="2" spans="1:6" x14ac:dyDescent="0.3" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" />"""
element = fromstring(src)
parser = WorkSheetParser
parser.parse_row(element)
assert 2 not in parser.ws.row_dimensions
def test_shared_formulae(WorkSheetParser, datadir):
datadir.chdir()
parser = WorkSheetParser
ws = parser.ws
parser.shared_strings = ["Whatever"] * 7
with open("worksheet_formulae.xml") as src:
parser.source = src.read()
parser.parse()
assert set(ws.formula_attributes) == set(['C10'])
# Test shared forumlae
assert ws['B7'].data_type == 'f'
assert ws['B7'].value == '=B4*2'
assert ws['C7'].value == '=C4*2'
assert ws['D7'].value == '=D4*2'
assert ws['E7'].value == '=E4*2'
# Test array forumlae
assert ws['C10'].data_type == 'f'
assert ws.formula_attributes['C10']['ref'] == 'C10:C14'
assert ws['C10'].value == '=SUM(A10:A14*B10:B14)'
def test_cell_without_coordinates(WorkSheetParser, datadir):
datadir.chdir()
with open("worksheet_without_coordinates.xml", "rb") as src:
xml = src.read()
sheet = fromstring(xml)
el = sheet.find(".//{%s}row" % SHEET_MAIN_NS)
parser = WorkSheetParser
parser.shared_strings = ["Whatever"] * 10
parser.parse_row(el)
assert parser.ws.max_row == 1
assert parser.ws.max_column == 5
def test_external_hyperlinks(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<hyperlink xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"
display="http://test.com" r:id="rId1" ref="A1"/>
</sheet>
"""
from openpyexcel.packaging.relationship import Relationship, RelationshipList
r = Relationship(type="hyperlink", Id="rId1", Target="../")
rels = RelationshipList()
rels.append(r)
parser = WorkSheetParser
parser.source = src
parser.ws._rels = rels
parser.parse()
assert parser.ws['A1'].hyperlink.target == "../"
def test_local_hyperlinks(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" >
<hyperlinks>
<hyperlink ref="B4:B7" location="'STP nn000TL-10, PKG 2.52'!A1" display="STP 10000TL-10"/>
</hyperlinks>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
assert parser.ws['B4'].hyperlink.location == "'STP nn000TL-10, PKG 2.52'!A1"
def test_merge_cells(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<mergeCells>
<mergeCell ref="C2:F2"/>
<mergeCell ref="B19:C20"/>
<mergeCell ref="E19:G19"/>
</mergeCells>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
assert parser.ws.merged_cells == "C2:F2 B19:C20 E19:G19"
def test_conditonal_formatting(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<conditionalFormatting sqref="S1:S10">
<cfRule type="top10" dxfId="25" priority="12" percent="1" rank="10"/>
</conditionalFormatting>
<conditionalFormatting sqref="T1:T10">
<cfRule type="top10" dxfId="24" priority="11" bottom="1" rank="4"/>
</conditionalFormatting>
</sheet>
"""
from openpyexcel.styles.differential import DifferentialStyle
parser = WorkSheetParser
dxf = DifferentialStyle()
parser.differential_styles = [dxf] * 30
parser.source = src
parser.parse()
assert parser.ws.conditional_formatting['T1:T10'][-1].dxf == dxf
def test_sheet_properties(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr codeName="Sheet3">
<tabColor rgb="FF92D050"/>
<outlinePr summaryBelow="1" summaryRight="1"/>
<pageSetUpPr/>
</sheetPr>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
assert parser.ws.sheet_properties.tabColor.rgb == "FF92D050"
assert parser.ws.sheet_properties.codeName == "Sheet3"
def test_sheet_format(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetFormatPr defaultRowHeight="14.25" baseColWidth="15"/>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
assert parser.ws.sheet_format.defaultRowHeight == 14.25
assert parser.ws.sheet_format.baseColWidth == 15
def test_tables(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<tableParts count="1">
<tablePart r:id="rId1"/>
</tableParts>
</sheet>
"""
parser = WorkSheetParser
r = Relationship(type="table", Id="rId1", Target="../tables/table1.xml")
rels = RelationshipList()
rels.append(r)
parser.ws._rels = rels
parser.source = src
parser.parse()
assert parser.tables == ["../tables/table1.xml"]
def test_auto_filter(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<autoFilter ref="A1:AK3237">
<sortState ref="A2:AM3269">
<sortCondition ref="B1:B3269"/>
</sortState>
</autoFilter>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
ws = parser.ws
assert ws.auto_filter.ref == "A1:AK3237"
assert ws.auto_filter.sortState.ref == "A2:AM3269"
assert ws.sort_state.ref is None
@pytest.mark.xfail
def test_sort_state(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sortState ref="A2:AM3269">
<sortCondition ref="B1:B3269"/>
</sortState>
</sheet>
"""
parser = WorkSheetParser
parser.source = src
parser.parse()
ws = parser.ws
assert ws.sort_state.ref == "A2:AM3269"
def test_page_break(WorkSheetParser):
src = """
<sheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<rowBreaks count="1" manualBreakCount="1">
<brk id="15" man="1" max="16383" min="0"/>
</rowBreaks>
</sheet>
"""
expected_pagebreak = PageBreak()
expected_pagebreak.append(Break(id=15))
parser = WorkSheetParser
parser.source = src
parser.parse()
ws = parser.ws
assert ws.page_breaks == expected_pagebreak
| StarcoderdataPython |
1659107 | import socket
import pickle
import struct
import sys
from typing import Iterable
import traceback
'''
ModelBridgeServer (MODEL SIDE):
- Receives:
- MOOSDB Subscribed to (mainly for state construction)
- Sends:
- Heartbeat?
- Actions:
- IvP function actions (speed, course)
- MOOSDB actions (var, value) pairs to post
ModelBridgeClient (MOOSDB SIDE):
- Receives:
- Actions:
- See above
- Sends:
- Heartbeat?
- MOOSDB variables
'''
# Socket Helpers ===========================
TYPE_CTRL=0
TYPE_ACTION=1
TYPE_MUST_POST=2
TYPE_STATE=3
TYPES = (TYPE_CTRL, TYPE_ACTION, TYPE_MUST_POST, TYPE_STATE)
HEADER_SIZE=4
MAX_BUFFER_SIZE=8192
def recv_full(connection, timeout=None, return_read=False):
messages = []
current_len = None
last_read = None
total_read = None
# Create byte string for storing the header in
tmp_data = b''
# Attempt first receive with timeout
try:
connection.settimeout(timeout)
tmp_data = connection.recv(MAX_BUFFER_SIZE)
finally:
# Cleanup regardless
connection.settimeout(None)
last_read = len(tmp_data)
total_read = last_read
# If buffer read was full, call until not full
# Attempt to empty the recv queue
while last_read == MAX_BUFFER_SIZE:
print('WARNING: Got max buffer attempting to clear queue...')
try:
# Non blocking, just checking if there is more in queue
connection.settimeout(0.001)
tmp_data += connection.recv(MAX_BUFFER_SIZE)
last_read = len(tmp_data)
total_read += last_read
except socket.timeout:
last_read = 0
finally:
connection.settimeout(None)
# While we have data to process into messages
while len(tmp_data) != 0:
# Get more data if message is incomplete
if (current_len is None and len(tmp_data) < HEADER_SIZE) or (current_len is not None and len(tmp_data) < current_len):
tmp_data += connection.recv(MAX_BUFFER_SIZE)
last_read = len(tmp_data)
total_read += last_read
if current_len is None:
# We should be looking for a header
if len(tmp_data) >= HEADER_SIZE:
# We can construct a header (current_len)
current_len = struct.unpack('>i', tmp_data[:HEADER_SIZE])[0]
# Remove header data from our data store
tmp_data = tmp_data[HEADER_SIZE:]
# Not else b/c previous clause might have constructed it
if current_len is not None:
# We should be looking for a message
if len(tmp_data) >= current_len:
# We can construct a packed
messages.append(tmp_data[:current_len])
# Remove the packet just constructed from out data store
tmp_data = tmp_data[current_len:]
current_len = None # Signal we are looking for another header
if return_read:
return messages, total_read
return messages
def send_full(connection, data, type):
assert type < 5
# Create C struct (in python bytes)
# '>i' specifies a big-endian encoded integer (a standard size of 4 bytes)
# '>ii' does two big-endian numbers
packed_size = struct.pack('>i', len(data))
# Concat the size (our 8 bytes header) and data then send
result = connection.sendall(packed_size+data)
assert result is None
# Assertion Helpers ===========================
def checkFloat(var, error_string):
try:
return float(var)
except ValueError:
raise ValueError(error_string)
def checkInstruction(instr):
assert isinstance(instr, dict), "Instruction must be a dict"
assert "speed" in instr, "Instruction must have key 'speed'"
instr['speed'] = checkFloat(instr['speed'], "Instruction['speed'] must be a float")
assert "course" in instr, "Action must have key 'course'"
instr['course'] = checkFloat(instr['course'], "Instruction['course'] must be a float")
assert "posts" in instr, "Instruction must have key 'posts'"
assert isinstance(instr["posts"], dict), "posts must be a dict"
assert "ctrl_msg" in instr, "Instruction must have key 'ctrl_str'"
assert isinstance(instr['ctrl_msg'], str), 'ctrl_msg must be string'
def checkState(state):
assert isinstance(state, dict), "State must be dict"
assert "NAV_X" in state, "State must have 'NAV_X' key"
assert "NAV_Y" in state, "State must have 'NAV_Y' key"
def checkMustPost(must_post):
assert isinstance(must_post, dict), "TYPE_MUST_POSTs must have type"
class ModelBridgeServer:
def __init__(self, hostname="localhost", port=57722):
self.host = hostname
self.port = port
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Line below reuses the socket address if previous socket closed but improperly
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self.host, self.port))
self._client = None
self._address = None
self.last_read = 0
def __enter__(self):
return self
def accept(self):
# Close current connection if we have one
if self._client is not None:
self._client.close()
self._client = None
self._socket.listen(0) # Only accept one connection
# Wait for client
self._client, self._address = self._socket.accept()
print(f"Client connected from {self._address}")
def send_instr(self, instr):
# Test submitted instruction
checkInstruction(instr)
# Fail if no client connected
if self._client is None:
return False
try:
send_full(self._client, pickle.dumps(instr), TYPE_ACTION)
except ConnectionResetError:
# Client has left
self.close_client()
return False
return True
def listen_state(self, timeout=None):
if self._client is None:
return False
try:
msgs, self.last_read = recv_full(self._client, timeout=timeout, return_read=True)
except socket.timeout:
return False
assert len(msgs) == 1, 'State should only come one at a time'
state = pickle.loads(msgs[0])
checkState(state)
return state
def close_client(self):
if self._client is not None:
self._client.close()
def close(self):
self.close_client()
if self._socket is not None:
self._socket.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class ModelBridgeClient:
def __init__(self, hostname="localhost", port=57722):
self.host = hostname
self.port = port
self._socket = None
def __enter__(self):
return self
def connect(self, timeout=1):
if self._socket is not None:
raise RuntimeError("Clients should not be connect more than once")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Attempt connection with timeout
try:
self._socket.settimeout(timeout)
self._socket.connect((self.host, self.port))
# Dont hold onto the timeout if we succeed
self._socket.settimeout(None)
except (socket.timeout, ConnectionRefusedError) as e:
# Clean up socket
self._socket.close()
self._socket = None
# Signal failure in event of timeout
return False
# Return status
return True
def send_state(self, state):
if self._socket is None:
return False
# Test submitted action
checkState(state)
try:
send_full(self._socket, pickle.dumps(state), TYPE_STATE)
except BrokenPipeError:
# Server has disconnected, reset
self.close()
return False
return True
def listen(self, timeout=0.0005):
if self._socket is None:
return False
try:
msgs = recv_full(self._socket, timeout=timeout)
except socket.timeout:
return False
assert len(msgs) == 1, 'Instructions should only come one at a time'
instr = pickle.loads(msgs[0])
checkInstruction(instr)
return instr
def close(self):
if self._socket is not None:
self._socket.close()
def __exit__(self, exc_type, exc_value, traceback):
self.close() | StarcoderdataPython |
167816 | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"numpy==1.20.3",
"scipy==1.7.0",
"Cython==0.29.23",
"ConfigSpace==0.4.18",
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', "scikit-learn"]
setup(
author="<NAME>",
author_email='<EMAIL>',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Bayesian Optimization by Density-Ratio Estimation",
install_requires=requirements,
extras_require={"hpbandster": ["hpbandster==0.7.4"],
"tf": ["tensorflow==2.5.0"],
"tf-gpu": ["tensorflow-gpu==2.5.0"]},
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='bore',
name='bore',
packages=find_packages(include=['bore', 'bore.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/ltiao/bore',
version='1.5.0',
zip_safe=False,
)
| StarcoderdataPython |
6517359 | <reponame>fraank/kaggle-birdclef-2021
import torch
from pathlib import Path
import librosa
import numpy as np
from torch.utils.data import Dataset, DataLoader
import json
import pandas as pd
import os
import math
from PIL import Image
import warnings
from helpers.audio_utils import *
from dataloaders.imbalanced_dataset_sampler import ImbalancedDatasetSampler
warnings.filterwarnings("ignore")
class AudioDataset(Dataset):
def __init__(self, root_dir, csv_dir, conf, bird_code, mem_size=32, file_type="ogg", num_splits=5, apply_mix_aug = False, isTraining=True, transform=None):
self.root_dir = root_dir
self.data = list(pd.read_csv(csv_dir)[["filename", "primary_label"]].to_dict('index').values())
self.transform = transform
self.conf = conf
self.num_splits = num_splits
self.isTraining = isTraining
self.apply_mix_aug = apply_mix_aug
self.bird_code = bird_code
self.length = len(self.data)
self.memory_buffer = []
self.mem_size = mem_size
self.file_type = file_type
self.additional_loader_params = {
"worker_init_fn": self.init_workers_fn
}
self.sampler = ImbalancedDatasetSampler
def get_label(self, dataset, idx):
return dataset.data[idx]["primary_label"]
def init_workers_fn(self, worker_id):
new_seed = int.from_bytes(os.urandom(4), byteorder='little')
np.random.seed(new_seed)
def __len__(self):
return self.length
def mix_aug(self, y_snippets, label):
labels = [[label]] * len(y_snippets)
mixed_snippets = y_snippets[:]
for index, m_snip in enumerate(mixed_snippets):
if len(self.memory_buffer)>self.mem_size*0.8:
num_to_mix = np.random.randint(0,min(4, len(self.memory_buffer)))
mem_y_snippets = [m_snip]
mem_label_snippets = [label]
for i in range(num_to_mix):
memory_item = self.memory_buffer.pop(0)
mem_y_snippets.append(memory_item["snippet"])
mem_label_snippets.append(memory_item["label"])
if len(mem_y_snippets)>1:
mixed_snippets[index]=np.array(mem_y_snippets).sum(0)/len(mem_y_snippets)
labels[index] = mem_label_snippets
if np.random.random()>0.1:
if len(self.memory_buffer)<self.mem_size:
mem_choice = np.random.randint(1,len(y_snippets)+1)
choices_to_add = np.random.choice(list(range(0,len(y_snippets))), mem_choice, replace=False)
for ch in choices_to_add:
self.memory_buffer.append({
"snippet": y_snippets[ch],
"label": label
})
return mixed_snippets, labels
def __getitem__(self, idx):
item = self.data[idx]
label = item["primary_label"]
filename = item["filename"]
file_dir = self.root_dir[label[0]]/f"{label}"/f"{filename}"
y, duration = read_audio(file_dir, self.conf)
snip_duration = self.conf.sampling_rate*self.conf.duration
if self.isTraining:
if duration-snip_duration <= 0:
indices = [0]*self.num_splits
else:
indices = np.random.randint(0,duration-snip_duration,self.num_splits)
else:
if (duration-snip_duration)<=0 or math.ceil((duration-snip_duration)/self.num_splits)<=0:
indices = [0]*self.num_splits
else:
indices = list(range(0, duration-snip_duration,math.ceil((duration-snip_duration)/self.num_splits)))
y_snippets = get_snippets(y, snip_duration, indices)
if self.apply_mix_aug:
y_snippets, list_of_labels = self.mix_aug(y_snippets, label)
else:
list_of_labels = [[label]] * len(y_snippets)
list_of_images = []
for y_snip in y_snippets:
image = audio_to_melspectrogram(y_snip, self.conf)
image = mono_to_color(image)
list_of_images.append(image)
coded_labels = np.zeros((len(list_of_images),len(self.bird_code)))
for index, temp_labels in enumerate(list_of_labels):
for temp_label in temp_labels:
label_index = self.bird_code[temp_label]
coded_labels[index][label_index] = 1
list_of_images = np.array(list_of_images)
if self.transform:
list_of_images = self.transform(list_of_images)
return {
"filenames":filename,
"images": list_of_images,
"coded_labels": torch.tensor(coded_labels).float()
} | StarcoderdataPython |
3335075 | <gh_stars>1-10
# Generated by Django 2.2.2 on 2019-06-13 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("klanad", "0009_auto_20190613_1003")]
operations = [
migrations.CreateModel(
name="KlanadTranslations",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"welcome_title",
models.CharField(
help_text="Title of the welcome message.", max_length=255
),
),
(
"welcome_title_de",
models.CharField(
help_text="Title of the welcome message.",
max_length=255,
null=True,
),
),
(
"welcome_title_en",
models.CharField(
help_text="Title of the welcome message.",
max_length=255,
null=True,
),
),
(
"welcome_title_nl",
models.CharField(
help_text="Title of the welcome message.",
max_length=255,
null=True,
),
),
(
"welcome_title_fr",
models.CharField(
help_text="Title of the welcome message.",
max_length=255,
null=True,
),
),
(
"welcome_message",
models.TextField(help_text="Message shown on the landing page."),
),
(
"welcome_message_de",
models.TextField(
help_text="Message shown on the landing page.", null=True
),
),
(
"welcome_message_en",
models.TextField(
help_text="Message shown on the landing page.", null=True
),
),
(
"welcome_message_nl",
models.TextField(
help_text="Message shown on the landing page.", null=True
),
),
(
"welcome_message_fr",
models.TextField(
help_text="Message shown on the landing page.", null=True
),
),
(
"footer_email_me",
models.CharField(
help_text="Message to email the owner of the site.",
max_length=100,
),
),
(
"footer_email_me_de",
models.CharField(
help_text="Message to email the owner of the site.",
max_length=100,
null=True,
),
),
(
"footer_email_me_en",
models.CharField(
help_text="Message to email the owner of the site.",
max_length=100,
null=True,
),
),
(
"footer_email_me_nl",
models.CharField(
help_text="Message to email the owner of the site.",
max_length=100,
null=True,
),
),
(
"footer_email_me_fr",
models.CharField(
help_text="Message to email the owner of the site.",
max_length=100,
null=True,
),
),
],
)
]
| StarcoderdataPython |
3264446 | <gh_stars>0
from flask.views import MethodView
from flask import request, session
from flask import jsonify
from orm.fields import HasField
from orm.exceptions import FieldNotValidError, ObjectNotFoundError
from .login import api_login_required
'''
These two classes implements basic JSON REST-api
'''
class APIListView(MethodView):
methods = ['GET', 'POST']
decorators = [api_login_required]
def getModel(self):
raise Exception("Model not defined")
def get(self):
objects = self.getModel().filter(owner__exact=session['logged_in']).serialize()
resp = jsonify({"data":objects})
resp.status_code = 200
return resp
def post(self):
Model = self.getModel()
try:
populated_model = Model.deserialize(request.json, True)
populated_model.owner = session['logged_in']
populated_model.save()
resp = jsonify(populated_model.serialize())
resp.status_code = 201 # created
return resp
except FieldNotValidError as e:
resp = jsonify({"error": "Field '" + str(e) + "' is not valid!", "code": 400})
resp.status_code = 400
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp
class APIDetailView(MethodView):
methods = ['GET', 'PUT', 'DELETE']
decorators = [api_login_required]
def getModel(self):
raise Exception("Model not defined")
def get(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
except ObjectNotFoundError as e:
resp = jsonify({"error": "Object not found", "code": 404})
resp.status_code = 404
return resp
if field and hasattr(obj, field) and hasattr(obj, '_'+field) and isinstance(getattr(obj, '_'+field), HasField):
ret = {}
ret['data'] = getattr(obj, field).serialize()
resp = jsonify(ret)
resp.status_code = 200
return resp
resp = jsonify(obj.serialize())
resp.status_code = 200
return resp
def put(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
populated_model = self.getModel().deserialize(request.json)
populated_model.owner = session['logged_in']
populated_model.save()
resp = jsonify(populated_model.serialize())
resp.status_code = 200
return resp
except FieldNotValidError as e:
resp = jsonify({"error": "Field '" + str(e) + "' is not valid!", "code": 400})
resp.status_code = 400
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp
def delete(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
resp = jsonify(obj.serialize())
obj.delete()
resp.status_code = 200
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp | StarcoderdataPython |
3220807 | import torch
import torch.nn as nn
class Discriminator2(nn.Module):
def __init__(self, n_h):
super(Discriminator2, self).__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
# c_x = torch.unsqueeze(c, 1)
# c_x = c_x.expand_as(h_pl)
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 1)
return logits
| StarcoderdataPython |
3466165 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for getting commit information from Gitiles."""
from dashboard.services import request
def GetNumbering(number, numbering_identifier, numbering_type, project, repo):
url = 'https://cr-rev.appspot.com/_ah/api/crrev/v1/get_numbering'
params = {
'number': number,
'numbering_identifier': numbering_identifier,
'numbering_type': numbering_type,
'project': project,
'repo': repo
}
return request.RequestJson(url, 'GET', **params)
| StarcoderdataPython |
1669658 | <reponame>joey66666/Codeyard
#
# @lc app=leetcode.cn id=240 lang=python3
#
# [240] 搜索二维矩阵 II
#
# https://leetcode-cn.com/problems/search-a-2d-matrix-ii/description/
#
# algorithms
# Medium (47.92%)
# Likes: 767
# Dislikes: 0
# Total Accepted: 176.5K
# Total Submissions: 366.7K
# Testcase Example: '[[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]]\n' +
# '5'
#
# 编写一个高效的算法来搜索 m x n 矩阵 matrix 中的一个目标值 target 。该矩阵具有以下特性:
#
#
# 每行的元素从左到右升序排列。
# 每列的元素从上到下升序排列。
#
#
#
#
# 示例 1:
#
#
# 输入:matrix =
# [[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]],
# target = 5
# 输出:true
#
#
# 示例 2:
#
#
# 输入:matrix =
# [[1,4,7,11,15],[2,5,8,12,19],[3,6,9,16,22],[10,13,14,17,24],[18,21,23,26,30]],
# target = 20
# 输出:false
#
#
#
#
# 提示:
#
#
# m == matrix.length
# n == matrix[i].length
# 1 <= n, m <= 300
# -10^9 <= matrix[i][j] <= 10^9
# 每行的所有元素从左到右升序排列
# 每列的所有元素从上到下升序排列
# -10^9 <= target <= 10^9
#
#
#
# @lc code=start
"""
1. Solution1, 单行双指针, Time: (mlogn), Space: O(1), Runtime: 85%
- 纵向遍历,每行再左右双指针往中间遍历
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
for row in range(rows):
if matrix[row][0] <= target <= matrix[row][cols - 1]:
left, right = 0, cols - 1
while left <= right:
if matrix[row][left] == target or matrix[row][right] == target:
return True
left += 1
right -= 1
return False
"""
2. Solution2, 行列双指针, Time: O(logmlogn), Space: O(1), Runtime: 97%
- 纵向确定上下range,再对range内每行左右双指针往中间遍历
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
low, high = 0, rows - 1
while matrix[low][0] > target and low < rows - 1:
low += 1
while matrix[high][0] > target and high > 0:
high -= 1
for row in range(low, high + 1):
if matrix[row][0] <= target <= matrix[row][cols - 1]:
left, right = 0, cols - 1
while left <= right:
if matrix[row][left] == target or matrix[row][right] == target:
return True
left += 1
right -= 1
return False
"""
3. Solution3, Z型遍历, Time: O(m + n), Space: O(1), Runtime: 93%
- 从右上角往左下角遍历
- `matrix[i][j] == target: return True`
- `matrix[i][j] > target`,因为每列为ascending,所以`matrix[i][j]`所在列都 `> target`, 所以向左移一列
- `matrix[i][j] < target`,因为每行为ascending,所以`matrix[i][j]`所在行都 `< target`, 所以向下移一行
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
i, j = 0, cols - 1
while i < rows and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] > target:
j -= 1
else:
i += 1
return False
# @lc code=end
| StarcoderdataPython |
6430481 | <reponame>mprhode/ICS_advml_workshop<filename>models.py
from sklearn.metrics import mean_squared_error, confusion_matrix, f1_score, accuracy_score
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest, RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import LocalOutlierFactor
from sklearn import svm
#import xgboost as xgb
import numpy as np
import pickle as pkl
from pathlib import Path
import yaml
import matplotlib.pyplot as plt
from zipfile import ZipFile
import pandas as pd
from utils import classification_cols, df_handle_categorical, ignore
from download_data import pcap_to_df
np.random.seed(1)
model_folder = Path("Models")
eps = np.finfo('float64').eps
# unzip any models that need it
for item in model_folder.iterdir():
if item.suffix == ".zip":
if not (model_folder/item.stem).exists():
z = ZipFile(item)
z.extractall(model_folder)
z.close()
class Model():
def __init__(self, features, save_model_name=None, verbose=True):
self.save_model_path = None if save_model_name is None else model_folder / save_model_name
self.model_exists = False if save_model_name is None else self.save_model_path.exists()
if self.model_exists:
if verbose:
print("save_model_path exists, loading model and config....")
self.load_model()
if verbose:
print(self.call_model)
print(self.features)
else:
self.features = [f for f in features if not (f in classification_cols + ignore)]
self.config = {
"features": self.features
}
self.tpr = None
self.tnr = None
self.cm = None
def parse_pcap(self, filename):
data = pcap_to_df(filename, filename.replace(".pcap", ".csv"))
data = df_handle_categorical(data)
return data
def balance_data(self, data):
ben_rows = data[data["malicious"] == 0].index
mal_rows = data[data["malicious"] == 1].index
min_class = min(len(ben_rows), len(mal_rows)) - 1 # -1 to allow easy coding for np.random.choice() below
ben_rows = np.random.choice(ben_rows, size=min_class, replace=False)
mal_rows = np.random.choice(mal_rows, size=min_class, replace=False)
return data[data.index.isin(ben_rows) | data.index.isin(mal_rows)]
def prep_data(self, data, train=False, malicious=None):
if type(data) is tuple:
x, labels = data
else:
if (type(data) is str) and (".pcap" in data):
assert malicious is not None, ("Must provide malicious labels (int or iterable) if using pcap, not None")
data = self.parse_pcap(data)
data["malicious"] = malicious
elif type(data) is np.ndarray:
data = pd.DataFrame(data, columns=self.features)
if not ("malicious" in data.columns.values):
data["malicious"] = 1
if not self.anomaly and train:
data = self.balance_data(data)
else:
for f in self.features:
if not(f in data.columns.values):
data[f] = 0
x = data[self.features].values
labels = data["malicious"].values.astype(int)
assert len(x) == len(labels)
return x, labels
def get_predictions(self, x):
return self.call_model.predict(x)
def fit_model(self, x, labels):
if self.anomaly:
self.call_model.fit(x)
else:
self.call_model.fit(x, labels)
def train(self, data, verbose=True, continue_training=False):
if (not self.save_model_path.exists()) or continue_training:
x, labels = self.prep_data(data, train=(True and not(continue_training)))
self.fit_model(x, labels)
if not(self.save_model_path is None):
self.save_model()
self.test(data, dataset_name="Training", verbose=verbose)
def test(self, data, verbose=True, dataset_name="Testing", malicious=None, return_x_y_preds=False):
x, labels = self.prep_data(data, malicious=malicious)
predictions = self.get_predictions(x)
predictions = predictions.round()
self.cm = confusion_matrix(labels, predictions)
# handle one-class perfect predictions
if (labels.var() == 0) and np.equal(labels, predictions).all():
self.cm = np.zeros((2, 2))
label_value = labels.max().astype(int)
self.cm[label_value, label_value] = len(labels)
tn, fp, fn, tp = self.cm.ravel().astype(np.float32)
self.tpr = tp / (tp + fn + eps)
self.tnr = tn / (tn + fp + eps)
if verbose:
print("-----")
print("{} acc: {:.2f}, f1: {:.2f}, tpr: {:.2f}, tnr {:.2f}".format(
dataset_name, accuracy_score(labels, predictions), f1_score(labels, predictions), self.tpr, self.tnr))
print(self.cm)
print("-----")
if return_x_y_preds:
return x, labels, predictions
def load_model(self):
with open(self.save_model_path / "model.pkl", "rb") as f:
self.call_model = pkl.load(f)
with open(self.save_model_path / "config.yml", "r") as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
self.features = list(self.config["features"])
if "contamination" in self.config.keys():
self.contamination = self.config["contamination"]
self.anomaly = True
else:
self.anomaly = False
def save_model(self):
if self.model_exists:
print("not saving model as model already exists")
return
# save model and config details
print("saving model...")
self.save_model_path.mkdir()
with open(self.save_model_path/"model.pkl", "wb") as f:
pkl.dump(self.call_model, f)
with open(self.save_model_path/"config.yml", "w") as f:
yaml.dump(self.config, f)
def get_classifier(self):
return self.call_model
class BlackBoxModel(Model):
def __init__(self, save_model_name=None, verbose=False):
super(BlackBoxModel, self).__init__(None, save_model_name=save_model_name, verbose=verbose)
self.__features = [f for f in self.features]
self.features = None
def train(self):
return self.call_model
def save_model(self):
return self.call_model
def prep_data(self, data, train=False, malicious=1):
if type(data) is tuple:
x, labels = data
else:
if (type(data) is str) and (".pcap" in data):
data = self.parse_pcap(data)
elif type(data) is np.ndarray:
data = pd.DataFrame(data, columns=self.features)
if not ("malicious" in data.columns.values):
data["malicious"] = 1
if not self.anomaly and train:
data = self.balance_data(data)
else:
for f in self.__features:
if not (f in data.columns.values):
data[f] = 0
x = data[self.__features].values
labels = data["malicious"].values.astype(int)
assert len(x) == len(labels)
return x, labels
def test(self, data):
if True: #try:
x, labels = self.prep_data(data, malicious=1)
predictions = self.get_predictions(x)
predictions = predictions.round()
total_detect = predictions.sum()
print("{:.2f}% ({}) packets detected".format(total_detect*100/len(predictions), total_detect))
# todo breakdown by packet type
return predictions
# except Exception:
# return "No result"
def __str__(self):
return "No model data"
modelA = BlackBoxModel(save_model_name="modelB")
modelB = BlackBoxModel(save_model_name="modelC")
class AnomalyModel(Model):
def __init__(self, features, contamination=None, save_model_name=None):
super(AnomalyModel, self).__init__(features, save_model_name=save_model_name)
self.model_name = "anomaly detection"
self.anomaly = True
self.config["problem"] = "anomaly"
self.config["contamination"] = float(contamination)
self.contamination = self.config["contamination"]
def get_predictions(self, x):
# transform (-1, 1) to (0, 1)
preds = (self.call_model.predict(x) + 1) / 2
return preds
class OneClassSVM(AnomalyModel):
def __init__(self, features, contamination=None, save_model_name=None):
super(OneClassSVM, self).__init__(features, contamination=contamination, save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = svm.OneClassSVM(nu=self.config["contamination"])
class ISOF(AnomalyModel):
def __init__(self, features, contamination=None, save_model_name=None):
super(ISOF, self).__init__(features, contamination=contamination, save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = IsolationForest(contamination=self.config["contamination"])
class LOF(AnomalyModel):
def __init__(self, features, contamination=None, save_model_name=None):
super(LOF, self).__init__(features, contamination=min(0.5, contamination), save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = LocalOutlierFactor(contamination=self.config["contamination"]) # always default to max of 0.5 for LOF as 0.5 is max and dataset is more malicious than benign
def prep_data(self, data, train=False):
# balance data for LOF because contamination must be 0.5 or less
if (self.contamination == 0.5) and train:
data = self.balance_data(data)
x = data[self.features].values
labels = data["malicious"].values.astype(int)
return x, labels
def fit_model(self, x, labels):
self.call_model.fit_predict(x)
def get_predictions(self, x):
preds = (self.call_model.fit_predict(x) + 1) / 2
return preds
class SupervisedModel(Model):
def __init__(self, features, save_model_name=None):
super(SupervisedModel, self).__init__(features, save_model_name=save_model_name)
self.anomaly = False
self.config["problem"] = "classification"
class RandomForest(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(RandomForest, self).__init__(features, save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = RandomForestClassifier()
class SVM(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(SVM, self).__init__(features, save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = svm.SVC()
self.model_name = "SVM"
class XGBoost(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(XGBoost, self).__init__(features, save_model_name=save_model_name)
self.call_model = xgb
self.config["param"] = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
self.config["num_round"] = 10
self.model_name = "XGB"
# here used trained_model + call_model to handle xgb setup
def fit_model(self, dataset, labels):
self.trained_model = self.call_model.train(self.config["param"], dataset, self.config["num_round"])
def prep_data(self, data, train=False):
if train:
data = self.balance_data(data)
x = data[self.features].values
labels = data["malicious"].values.astype(int)
dataset = xgb.DMatrix(x, label=labels)
return dataset, labels
def load_model(self):
bst = xgb.Booster({'nthread': self["config"]["nthread"]}) # init model
self.trained_model = bst.load_model(self.save_model_path/"model.bin") # load data
with open(self.save_model_path / "config.yml", "r") as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
def save_model(self):
if self.model_exists:
print("not saving model as model already exists")
return
# save model and config details
print("saving model...")
self.save_model_path.mkdir()
self.trained_model.saveModel(self.save_model_path/"model.bin")
with open(self.save_model_path/"config.yml", "w") as f:
yaml.dump(self.config, f)
def get_predictions(self, x):
return self.trained_model.predict(x)
def get_classifier(self):
return self.trained_model
class MLP(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(MLP, self).__init__(features, save_model_name=save_model_name)
if not (self.save_model_path).exists():
self.call_model = MLPClassifier()
class AdaBoost(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(AdaBoost, self).__init__(features, save_model_name=save_model_name)
if not(self.save_model_path).exists():
self.call_model = AdaBoostClassifier()
class DecisionTree(SupervisedModel):
def __init__(self, features, save_model_name=None):
super(DecisionTree, self).__init__(features, save_model_name=save_model_name)
if not(self.save_model_path).exists():
self.call_model = DecisionTreeClassifier()
def show(self):
plot_tree(self.call_model)
plt.show()
if __name__ == "__main__":
from utils import get_testing_data, get_training_data
import numpy as np
train_data = get_training_data(nrows=None)
features = [c for c in train_data.columns.values if not(c in classification_cols)]
contamination = train_data["malicious"].sum() / len(train_data)
test_data = get_testing_data(nrows=None)
AD_models = [LOF, ISOF, OneClassSVM]
print("contamination ratio", contamination)
# time model
for model_name, mc in [("dt", DecisionTree), ("MLP", MLP), ("rf", RandomForest), ("ISOF", ISOF), ("svm", SVM),
("OneClassSVM", OneClassSVM),
("LOF", LOF)]:
for feat_name, mini_feat in [
("time_model", ["time_delta", "IP__ttl"] + [x for x in features if ( "Ethernet__type" in x) or ("IP__proto" in x)]),
("src_dst_features", [x for x in features if ("src" in x) or ("dst" in x) or ("port" in x)]),
("all", features),
("all_except_src_dst", [x for x in features if not("src" in x) and not("dst" in x) and not("port" in x)]),
("IP_features", [x for x in features if "IP__" in x]),
("tcp_udp_modbus_icmp_boot", [x for x in features if ("TCP_" in x) or ("UDP_" in x) or ("MODBUS_" in x) or ("ICMP" in x) or ("BOOT" in x)])
]:
print("\n", mc, mini_feat)
if mc in AD_models:
model = mc(mini_feat, save_model_name="{}_{}".format(feat_name, model_name), contamination=contamination)
else:
model = mc(mini_feat, save_model_name="{}_{}".format(feat_name, model_name))
if model.model_exists:
continue
if "svm" in model_name.lower():
model.train(train_data[::50])
else:
model.train(train_data)
model.test(test_data)
| StarcoderdataPython |
6548971 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Parser that extracts the folder structure in the master remote storage and keeps only xml (catalogue) files.
It produces a csv file of report, which can be checked in order to integrate missing xml files, and a folder with the structure of the
xml materials, plus adding the provenance in all xml files. This folder structure will be used by the next step of the metadata_importer
"""
__author__ = """<NAME>"""
import os, codecs, csv, logging, argparse
logging.basicConfig(filename="logs/xml_parser.log", level=logging.INFO)
from bs4 import BeautifulSoup
from supporting_functions import walklevel
def xml_parser(in_folder,out_folder,csv_file):
"""
Parser of the Linked Books storage, in order to extract xml metadata files and generate a csv_report of all volumes with missing xml file, without images, incorrectly containing folders or with too many xml files (more than 1 per level).
:param in_folder: Folder pointing to the Linked Books main storage.
:param out_folder: Folder where xml files are stored, mimicking the structure of the storage. This Folder is NOT overwritten at every run, but simply updated.
:param csv_file: CSV file where problems are reported for further action. This file is overwritten at every run.
:return: None.
"""
# basic checks
assert os.path.isdir(in_folder)
if not os.path.isdir(out_folder):
try:
os.makedirs(out_folder, exist_ok=True)
except:
logging.warning("Unable to create the out_folder: %s"%out_folder)
assert os.path.isdir(out_folder)
logging.info("Initial checks OK.")
with codecs.open(csv_file, "w", "utf-8") as f:
csv_writer = csv.writer(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_NONE)
csv_writer.writerow(["bid", "typology", "directory", "has_images", "has_many_xml", "has_subfolders", "has_meta", "provenance"])
for root, dirs, files, level in walklevel(in_folder, 3):
for d in dirs:
if level == 2:
has_meta_bid = False
if len(d.split("_")) == 2: # check provenance info is there
provenance, bid = d.split("_")
else:
continue
elif level == 3:
has_meta_issue = False
if len(root.split("/")[-1].split("_")) == 2: # check provenance info is there
provenance, bid = root.split("/")[-1].split("_")
else:
continue
else:
continue
for fil in os.listdir(os.path.join(root,d)):
if ".xml" in fil:
if level == 2:
has_meta_bid = True
if level == 3:
has_meta_issue = True
# read metadata and add provenance
try:
metadata = codecs.open(os.path.join(root, os.path.join(d,fil)), "r", "utf-8").read()
except:
logging.warning("Encoding error in %s"%(d+fil))
soup = BeautifulSoup(metadata, "html.parser")
if soup.find("provenance"): # if provenance is already there, just overwrite it
soup.find("provenance").string = provenance
else:
pr_tag = soup.new_tag("provenance")
pr_tag.string = provenance
if len(soup.find_all("dc")) > 0:
soup.dc.append(pr_tag)
else:
soup.append(pr_tag)
# make new dir and store new metadata in out_folder
new_dir = root.replace(in_folder,out_folder)
new_dir = os.path.join(new_dir,d)
if not os.path.isdir(new_dir):
try:
os.makedirs(new_dir, exist_ok=True)
except:
logging.warning("Unable to create the new_dir: %s" % new_dir)
try:
file_out = fil
if level == 2:
file_out = bid + ".xml"
f_out = open(os.path.join(new_dir, file_out), "wb")
f_out.write(soup.encode('utf-8', formatter="minimal"))
f_out.close()
logging.info("Wrote the xml file for: %s" % new_dir)
except:
logging.warning("Unable to write the xml file for: %s" % new_dir)
# report problematic cases in csv files
# check the presence of subfolders
has_subfolders = False
if len([x for x in os.listdir(os.path.join(root, d)) if os.path.isdir(os.path.join(root, d, x))]) > 0:
has_subfolders = True
# check the presence of several metadata files per folder
has_many_xml = False
if len([x for x in os.listdir(os.path.join(root, d)) if ".xml" in x]) > 1:
has_many_xml = True
# check the presence of images
has_images = False
if len([x for x in os.listdir(os.path.join(root, d)) if ".jpg" in x]) > 0:
has_images = True
if level == 2:
type_doc = "book"
if "journals" in root:
type_doc = "journal"
if ((type_doc == "book") and has_subfolders) or has_many_xml or ((type_doc == "book") and not has_images) or not has_meta_bid:
csv_writer.writerow([bid,type_doc,d,has_images,has_many_xml,has_subfolders,has_meta_bid,provenance])
elif level == 3:
type_doc = "issue"
if has_subfolders or has_many_xml or not has_images or not has_meta_issue:
csv_writer.writerow([bid,type_doc,d,has_images,has_many_xml,has_subfolders,has_meta_issue,provenance])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parser the Linked Books storage and exports xml metadata files.')
parser.add_argument('--in_folder', dest='in_folder',
help='The root folder of the Linked Books storage.')
parser.add_argument('--out_folder', dest='out_folder', nargs='?', default="xml_output",
help='The folder where xml files are to be stored.')
parser.add_argument('--csv_file', dest='csv_file', nargs='?', default="xml_parser_out.csv",
help='The CSV file where the result of parsing is saved.')
args = parser.parse_args()
xml_parser(args.in_folder, args.out_folder, args.csv_file) | StarcoderdataPython |
335663 | # -*- coding: utf-8 -*-
"""
Ketchup test config
"""
import superset_patchup
from flask_appbuilder.security.manager import AUTH_OAUTH
from superset import config as ketchup_config
ketchup_config.WTF_CSRF_ENABLED = False
ketchup_config.TESTING = True
ketchup_config.SECRET_KEY = 'abc'
superset_patchup.add_ketchup(ketchup_config)
SECRET_KEY = ketchup_config.SECRET_KEY
AUTH_TYPE = AUTH_OAUTH
OAUTH_PROVIDERS = [
{
'name': 'onadata',
'remote_app': {
'client_id': 'consumer key goes here',
'client_secret': 'consumer secret goes here',
'api_base_url': 'https://stage-api.ona.io/',
'access_token_url': 'https://stage-api.ona.io/o/token/',
'authorize_url': 'https://stage-api.ona.io/o/authorize/',
}
}
]
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:"
SQLALCHEMY_TRACK_MODIFICATIONS = False
| StarcoderdataPython |
1900538 | <gh_stars>1-10
from math import radians, sin, cos, acos
import sqlite3
from numpy import searchsorted, sort
import pandas as pd
con = sqlite3.connect('api/database/data.sqlite', check_same_thread=False)
# helper functions
def great_circle(lon1, lat1, lon2, lat2):
'''
https://medium.com/@petehouston/calculate-distance-of-two-locations-on-earth-using-python-1501b1944d97
'''
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
return 3958.756 * (
acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon1 - lon2))
)
def ecdf(x):
x = sort(x)
n = len(x)
def _ecdf(v):
# side='right' because we want Pr(x <= v)
return (searchsorted(x, v, side='right') + 1) / n
return _ecdf
# sql query things
def address_candidates_query(region, st_num):
return pd.read_sql('SELECT * FROM ' + region + ' WHERE st_num = ' + st_num, con)
def get_pin(region, pin):
if region == 'detroit':
pin_name = 'parcel_num'
elif region == 'cook':
pin_name = 'PIN'
qs = 'SELECT * FROM ' + region + ' WHERE ' + pin_name + ' = "' + pin + '"'
return pd.read_sql(qs, con)
def query_on(col, val, range_val, filter_type):
'''
type:
1 -> categorical
3 -> continuous
'''
if (filter_type == 1) & (range_val == 'Match'): #categorical
qs = ' AND "' + col + '" = "' + str(val) + '"'
return qs
elif filter_type == 3: #continuous
qs = ' AND "' + col + '" >= ' + \
str(val - range_val) + ' AND "' + col + '" <= ' + str(val + range_val)
return qs
else:
raise Exception('Query On Error')
def run_comps_query(query, val, range_val):
data = pd.read_sql(query, con)
if(data.shape[0] > 1):
data['Distance'] = data.apply(
lambda x: great_circle(val[0], val[1], x.Longitude, x.Latitude), axis=1)
else:
data['Distance'] = None
return data[data['Distance'] < range_val]
| StarcoderdataPython |
3564227 | import re
def recursive_split(text):
eq_sub = re.compile(r'^\W', flags=re.M)
# while re.search(r'^=',text):
# text = re.sub(eq_sub, '', text)
current_level = []
contents = re.split(r'\n(?=\w)',text)
for i in contents:
if re.search('=', i):
try:
nodes = re.split(r'(^\w.+\n)', i)
next_level = nodes[2]
while re.search(r'=',next_level):
next_level = re.sub(eq_sub, '', next_level)
current_level.append([nodes[1], recursive_split(next_level)])
except:
print('its time to stop')
else:
current_level.append([i])
return current_level
def list_tree(path):
split_path = os.path.splitext(path)
parsed_path = "%s_parsed_%s" % (split_path[0],'.txt')
utt_list = []
with open(parsed_path,mode='r', encoding='utf8') as file:
trees = re.sub(r'=-','==', file.read(), flags=re.M)
trees = re.sub(r'^=+\"=+','',trees, flags=re.M)
trees = re.sub(r'^\n\W+,','',trees, flags=re.M)
utts = re.split(r'\nsentence\n', trees)
for i in utts:
if re.search('UTT',i):
#não existem apenas UTTs nas redaçoes, averiguar
half = re.split('UTT.+\n',i)[1]
half = re.split('\n\.',half)[0]
arvere = ['UTT', recursive_split(half)]
utt_list.append(arvere)
return utt_list
def adjust_conll(path):
with open(path, 'r+') as file:
split_path = os.path.splitext(path)
new_conll_path = "%s_adaptado%s" % (split_path[0],'.conll')
with open(new_conll_path,'w+') as f:
for line in file.readlines():
if line.startswith('<'):
continue
elif re.search(r'^\d',line):
text = line.strip().split('\t')
text.extend(['_','_'])
f.write('\t'.join(text)+'\n')
else:
f.write(line)
with open(new_conll_path,'r') as f:
text = f.read()
text = re.sub(r'\n\s*\n', '\n\n',text)
with open(new_conll_path,'w') as f:
for sent in text.strip().split('\n\n'):
lines = sent.strip().split('\n')
if re.search(r'^\D',sent, flags=re.M):
continue
for line in lines:
f.write(line+'\n')
f.write('\n\n')
return
def train_test_conll(path):
split_path = os.path.splitext(path)
train_path = "%s_train%s" % (split_path[0],'.conll')
test_path = "%s_test%s" % (split_path[0],'.conll')
with open(path,'r', encoding='utf-8') as f:
sentences = f.read().strip().split('\n\n')
train,test = train_test_split(sentences)
with open(train_path,'w', encoding='utf-8') as f:
f.write('\n\n'.join(train))
with open(test_path,'w', encoding='utf-8') as f:
f.write('\n\n'.join(test))
def parsed_search(path, path_list):
split_path = os.path.splitext(path)
path_to_search = "%s_parsed_%s" % (split_path[0],'.txt')
if path_to_search in path_list:
return 1
else:
return 0
| StarcoderdataPython |
82549 | <filename>tools/pydev/movifier.py
# ******************************************************************************************************************
# **************************************************** movefier ****************************************************
# ******************************************************************************************************************
def moveifier_init():
print "This tool will move function definitions and the"
print "corresponding implementation to another file."
print ""
folderloc = raw_input("Please input the folder containing the hpp and cpp files: ").strip()
if not os.path.exists(folderloc):
print folderloc
print "Path is invalid: directory does not exist."
return
filenameer = raw_input("Please type the filename (excluding extension): ").strip()
header_file = os.path.join(folderloc, filenameer + ".hpp")
if not os.path.isfile(header_file):
print header_file
print "Header file is invalid: file not found."
return
else:
print "Header: " + header_file + " OK!"
implementation_file = os.path.join(folderloc, filenameer + ".cpp")
if not os.path.isfile(implementation_file):
print implementation_file
print "Implementation file is invalid: file not found."
return
else:
print "Implementation: " + implementation_file + " OK!"
print "OK! Header and implementation found."
dest = raw_input("Please input the script output location: ").strip()
if not os.path.exists(dest):
print dest
print "Script output folder is invalid: folder does not exist."
return
else:
print "Output: " + dest + " OK!"
moveifier_scan_files(header_file, implementation_file, dest)
def moveifier_scan_files(headerf, implf, dest):
funpat = re.compile('[\w_ \:]+\(.*\)\;')
function_definitions = []
flag_verbose = True
# Find all function signatures.
with open(headerf) as f:
for line in f:
cleanline = line.strip()
if funpat.match(cleanline):
if (cleanline.startswith("\\") or cleanline.startswith("\*") or cleanline.startswith("*") or cleanline.startswith("\**")):
moveifier_print_to_unsorted(cleanline, os.path.join(dest, "_unsorted.hpp"))
continue
function_definitions.append(cleanline)
if flag_verbose:
print "Found definition: " + cleanline
else:
moveifier_print_to_unsorted(cleanline, os.path.join(dest, "_unsorted.hpp"))
print "Function definitions found: " + str(len(function_definitions))
# Find the function implementation
implementationcounter = 0
f_arr = []
with open(implf) as f:
fcache = f.readlines()
f.seek(0)
clineno = 0
for definit in function_definitions:
while True:
line = f.readline()
clineno = clineno + 1
if not line: break
if definit.strip(';') in line.strip():
open_braces = line.strip().count("{")
if open_braces < 1:
print "NO open braces on line" + line
if (peek_line(f).strip().count("{") < 1):
print "NO open braces on peeked line."
continue
open_braces = open_braces - line.strip().count("}")
function_imp = line;
fcache[clineno] = ""
while (open_braces > 0):
thisline = f.readline()
fcache[clineno] = ""
clineno = clineno + 1
function_imp = function_imp + "\n" + thisline
open_braces = open_braces + thisline.strip().count("{")
open_braces = open_braces - thisline.strip().count("}")
print "Found implementation for " + definit.strip(';')
f_pair = [definit, function_imp]
f_arr.append(f_pair)
implementationcounter = implementationcounter + 1
continue;
clineno = 0
f.seek(0)
print "Function implemenations found: " + str(implementationcounter)
print "Writing unsorted backup... please wait."
for line in fcache:
moveifier_print_to_unsorted(line, os.path.join(dest, "_unsorted.cpp"))
print ""
print ""
print " Source file parsing completed successfully. "
print ""
print " Header: " + headerf
print " Implementation: " + headerf
print " Outputting to: "
print ""
print " Function definitions found: " + str(len(function_definitions))
print " Function implemenations found: " + str(implementationcounter)
print ""
print " Functions to be moveed: " + str(len(f_arr))
print ""
print ""
print ""
print " The next stage will move the detected functions into files."
print " To view the manual before moving, type 'man', otherwise type 'yes' to continue or 'no' to cancel."
print ""
while (True):
ask_result = raw_input(" Do you wish to begin moving the functions? [yes/no/man]: ").strip()
if ask_result == "yes":
moveifier_do_move(f_arr, dest)
clearscr()
print ""
print " moving finished!"
print ""
print " Summary: "
print " Functions moveed: "
print " Functions skipped: "
print ""
raw_input("Press enter to return to the main menu.")
return
elif ask_result == "no":
return
elif ask_result == "man":
moveifier_show_manual(f_arr, dest)
break
def moveifier_show_manual(f_arr, dest):
print ""
print "==========================================================================="
print ""
print " Intercept pydev Move tool manual "
print ""
print " This tool is designed to assist in the fast movement of functions and their"
print " implementation between files. The tool discovers defined functions and"
print " attempts to match these definitions to implementations. Once this has been"
print " done the user will then be presented with the series of functions which"
print " were discovered and asked to provide a filename to move these functions to."
print " "
print " To move a function simply type the name (with no ext.) of the file you wish to move it into."
print " "
print " To skip a function just press enter with no filename entered. This will move the"
print " function into a file named _skipped.xpp"
print " "
print " To stop moving functions press ctrl+c or close the command prompt."
print " "
print " "
print " Note: Functions are moved immediatley, with no undo function, however, the original"
print " source file from which functions come from are not modified in any way."
print " "
print " The next stage will move the detected functions into files."
print " Type 'yes' to start moving functions or 'no' to cancel."
print ""
ask_result = raw_input(" Do you wish to begin moving the functions? [yes/no]: ").strip()
if ask_result == "yes":
moveifier_do_move(f_arr, dest)
clearscr()
print ""
print " moving finished!"
print ""
print " Summary: "
print " Functions moveed: "
print " Functions skipped: "
print ""
raw_input("Press enter to return to the main menu.")
return
elif ask_result == "no":
return
def moveifier_print_to_unsorted(line, dest):
with open(dest,'a') as f: f.write(line + "\n")
def moveifier_do_move(f_arr, dest):
i = 0
for func in f_arr:
i = i + 1
clearscr()
print ""
print ""
print " --== moving Function #" + str(i) + "/" + str(len(f_arr)) + " ==--"
print ""
moveifier_print_func_info(func, False)
dest_loc = raw_input(" Destination [blank to skip]: ").strip()
if dest_loc == "":
dest_loc = "_unsorted"
header_op_to = os.path.join(dest, dest_loc + ".hpp")
implementation_op_to = os.path.join(dest, dest_loc + ".cpp")
with open(header_op_to,'a') as f: f.write(func[0] + "\n")
with open(implementation_op_to,'a') as f: f.write(func[1] + "\n")
def moveifier_print_func_info(func, expand_impl):
print ""
print ""
print " Function Signature: " + func[0]
print ""
print " Function Implementation:\n" + func[1]
print ""
print "" | StarcoderdataPython |
3211819 | <reponame>scalabli/yadig<filename>examples/request_files/tutorial002.py
from typing import List
import citus
app = citus.App()
@app.post("/files/")
async def create_files(files: List[bytes] = citus.File(...)):
return {"file_sizes": [len(file) for file in files]}
@app.post("/uploadfiles/")
async def create_upload_files(files: List[citus.UploadFile] = citus.File(...)):
return {"filenames": [file.filename for file in files]}
@app.get("/")
async def main():
content = """
<body>
<form action="/files/" enctype="multiparse/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
<form action="/uploadfiles/" enctype="multiparse/form-data" method="post">
<input name="files" type="file" multiple>
<input type="submit">
</form>
</body>
"""
return citus.responses.HTMLResponse(content=content)
| StarcoderdataPython |
257929 | <reponame>shhong/pycabnn
import numpy as np
runtimes = ['791.78s',
'819.38s',
'899.83s',
'827.53s',
'807.54s']
runtimes = [x.replace('s', '') for x in runtimes]
runtimes = [float(x) for x in runtimes]
print(np.mean(runtimes), np.std(runtimes)) | StarcoderdataPython |
3294379 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 15:13:33 2019
@author: ifenty
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
from .llc_array_conversion import llc_compact_to_tiles
from .llc_array_conversion import llc_compact_to_faces
from .llc_array_conversion import llc_faces_to_tiles
from .llc_array_conversion import llc_faces_to_compact
from .llc_array_conversion import llc_tiles_to_faces
from .llc_array_conversion import llc_tiles_to_compact
from .read_bin_llc import read_llc_to_compact, read_llc_to_faces, read_llc_to_tiles
from .tile_plot import plot_tiles
# Tests the read_bin_llc and llc_array_conversion routines
# %%
### Load model grid coordinates (longitude, latitude)
def run_read_bin_and_llc_conversion_test(llc_grid_dir, llc_lons_fname='XC.data',
llc_hfacc_fname='hFacC.data', llc=90,
llc_grid_filetype = '>f',
make_plots=False):
"""
Runs test on the read_bin_llc and llc_conversion routines
Parameters
----------
llc_grid_dir : string
A string with the directory of the binary file to open
llc_lons_fname : string
A string with the name of the XC grid file [XC.data]
llc_hfacc_fname : string
A string with the name of the hfacC grid file [hFacC.data]
llc : int
the size of the llc grid. For ECCO v4, we use the llc90 domain
so `llc` would be `90`.
Default: 90
llc_grid_filetype: string
the file type, default is big endian (>) 32 bit float (f)
alternatively, ('<d') would be little endian (<) 64 bit float (d)
Deafult: '>f'
make_plots : boolean
A boolean specifiying whether or not to make plots
Deafult: False
Returns
-------
1 : all tests passed
0 : at least one test failed
"""
# SET TEST RESULT = 1 TO START
TEST_RESULT = 1
# %% ----------- TEST 1: 2D field XC FOM GRID FILE
#%% 1a LOAD COMPACT
tmpXC_c = read_llc_to_compact(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
tmpXC_f = read_llc_to_faces(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
tmpXC_t = read_llc_to_tiles(llc_grid_dir, llc_lons_fname, llc=llc,
filetype=llc_grid_filetype)
if make_plots:
#plt.close('all')
for f in range(1,6):
plt.figure()
plt.imshow(tmpXC_f[f]);plt.colorbar()
plot_tiles(tmpXC_t)
plt.draw()
raw_input("Press Enter to continue...")
#%% 1b CONVERT COMPACT TO FACES, TILES
tmpXC_cf = llc_compact_to_faces(tmpXC_c)
tmpXC_ct = llc_compact_to_tiles(tmpXC_c)
for f in range(1,6):
tmp = np.unique(tmpXC_f[f] - tmpXC_cf[f])
print ('unique diffs CF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1b-1')
return TEST_RESULT
tmp = np.unique(tmpXC_ct - tmpXC_t)
print ('unique diffs for CT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1b-2')
return TEST_RESULT
#%% 1c CONVERT FACES TO TILES, COMPACT
tmpXC_ft = llc_faces_to_tiles(tmpXC_f)
tmpXC_fc = llc_faces_to_compact(tmpXC_f)
# unique diff tests
tmp = np.unique(tmpXC_t - tmpXC_ft)
print ('unique diffs for FT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1c-1')
return TEST_RESULT
tmp = np.unique(tmpXC_fc - tmpXC_c)
print ('unique diffs FC', tmp )
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1c-2')
return TEST_RESULT
#%% 1d CONVERT TILES to FACES, COMPACT
tmpXC_tf = llc_tiles_to_faces(tmpXC_t)
tmpXC_tc = llc_tiles_to_compact(tmpXC_t)
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpXC_f[f] - tmpXC_tf[f])
print ('unique diffs for TF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1d-1')
return TEST_RESULT
tmp = np.unique(tmpXC_tc - tmpXC_c)
print ('unique diffs TC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1d-2')
return TEST_RESULT
#%% 1e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT
tmpXC_cftfc = llc_faces_to_compact(llc_tiles_to_faces(llc_faces_to_tiles(llc_compact_to_faces(tmpXC_c))))
tmp = np.unique(tmpXC_cftfc - tmpXC_c)
print ('unique diffs CFTFC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 1e')
return TEST_RESULT
# %% ----------- TEST 2: 3D fields HFACC FOM GRID FILE
#%% 2a LOAD COMPACT
tmpHF_c = read_llc_to_compact(llc_grid_dir, llc_hfacc_fname, llc=llc,nk=50,
filetype=llc_grid_filetype)
tmpHF_f = read_llc_to_faces(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,
filetype=llc_grid_filetype)
tmpHF_t = read_llc_to_tiles(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,
filetype=llc_grid_filetype)
tmpHF_c.shape
if make_plots:
#plt.close('all')
plt.imshow(tmpHF_c[0,:]);plt.colorbar()
plot_tiles(tmpHF_t[:,0,:])
plot_tiles(tmpHF_t[:,20,:])
plt.draw()
raw_input("Press Enter to continue...")
#%% 2b CONVERT COMPACT TO FACES, TILES
tmpHF_cf = llc_compact_to_faces(tmpHF_c)
tmpHF_ct = llc_compact_to_tiles(tmpHF_c)
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpHF_f[f] - tmpHF_cf[f])
print ('unique diffs CF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2b-1')
return TEST_RESULT
tmp = np.unique(tmpHF_ct - tmpHF_t)
print ('unique diffs CT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2b-2')
return TEST_RESULT
if make_plots:
for k in [0, 20]:
for f in range(1,6):
plt.figure()
plt.imshow(tmpHF_cf[f][k,:], origin='lower');plt.colorbar()
plt.draw()
raw_input("Press Enter to continue...")
#%% 2c CONVERT FACES TO TILES, COMPACT
tmpHF_ft = llc_faces_to_tiles(tmpHF_f)
tmpHF_fc = llc_faces_to_compact(tmpHF_f)
if make_plots:
#plt.close('all')
plot_tiles(tmpHF_ft[:,0,:])
plot_tiles(tmpHF_ft[:,20,:])
plt.draw()
raw_input("Press Enter to continue...")
# unique diff tests
tmp = np.unique(tmpHF_t - tmpHF_ft)
print ('unique diffs FT ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2c-1')
return TEST_RESULT
tmp = np.unique(tmpHF_fc - tmpHF_c)
print ('unique diffs FC', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2c-2')
return TEST_RESULT
#%% 2d CONVERT TILES to FACES, COMPACT
tmpHF_tf = llc_tiles_to_faces(tmpHF_t)
tmpHF_tc = llc_tiles_to_compact(tmpHF_t)
if make_plots:
#plt.close('all')
for k in [0, 20]:
for f in range(1,6):
plt.figure()
plt.imshow(tmpHF_tf[f][k,:], origin='lower');plt.colorbar()
plt.draw()
raw_input("Press Enter to continue...")
# unique diff tests
for f in range(1,6):
tmp = np.unique(tmpHF_f[f] - tmpHF_tf[f])
print ('unique diffs TF ', f, tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2d-1')
return TEST_RESULT
tmp = np.unique(tmpHF_tc - tmpHF_c)
print ('unique diffs TC ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2d-1')
return TEST_RESULT
#%% 2e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT
tmpHF_cftfc = llc_faces_to_compact(llc_tiles_to_faces(
llc_faces_to_tiles(llc_compact_to_faces(tmpHF_c))))
tmp = np.unique(tmpHF_cftfc - tmpHF_c)
print ('unique diffs CFTFC ', tmp)
if len(tmp) != 1 or tmp[0] != 0:
TEST_RESULT = 0
print ('failed on 2e')
return TEST_RESULT
print ('YOU MADE IT THIS FAR, TESTS PASSED!')
return TEST_RESULT
####################### ###########################
#%%
if __name__== "__main__":
import sys
import matplotlib
sys.path.append('/Users/ifenty/ECCOv4-py/')
import ecco_v4_py as ecco
import matplotlib.pylab as plt
llc_grid_dir = '/Volumes/ECCO_BASE/ECCO_v4r3/grid_llc90/'
llc_lons_fname='XC.data'
llc_hfacc_fname='hFacC.data',
llc=90,
llc_grid_filetype = '>f',
make_plots=False
#%%
TEST_RESULT = ecco.run_read_bin_and_llc_conversion_test(llc_grid_dir, make_plots=True)
print(TEST_RESULT)
| StarcoderdataPython |
1661704 | import torch
from torchtext.legacy import data
from torchtext.legacy.data import Field, BucketIterator
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
import json
import pandas as pd
from .SeqDataset import SeqDataset
class CommonsenseQADataset(SeqDataset):
def __init__(self, data_path, seed, batch_size, device, split_ratio=[0.7, 0.3]):
# super(QuoraDataset, self).__init__(data_path, seed, batch_size, device, split_ratio)
self.split_ratio = split_ratio
self.data_path = data_path
self.seed = seed
self.device = device
self.batch_size = batch_size
self.seq_data = self.load_data(self.data_path)
def load_data(self, data_path):
# download data
if not isinstance(data_path, list):
data_path = [data_path]
# extract data
seq_data = pd.DataFrame()
for url in data_path:
resp = urlopen(url).read().decode()
data = pd.read_json(resp,lines=True)
df = pd.json_normalize(data.to_dict(orient='records'))
df['trg'] = df.apply(lambda r: [x for x in r['question.choices'] if x['label']==r['answerKey']][0]['text'], axis=1)
df = df[['question.stem','trg']]
seq_data = pd.concat([seq_data,df]).reset_index(drop=True)
seq_data.reset_index(drop=True,inplace=True)
seq_data.columns = ['src','trg']
return seq_data
| StarcoderdataPython |
114029 | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
import docker
import errno
import random
import six
import sys
import time
from . import audit
from .errors import AlreadyInitializedError
from .errors import EmbargoContainerConflictError
from .errors import EmbargoError
from .errors import DockerContainerNotFound
from .errors import InsufficientPermissionsError
from .net import NetworkState
from .state import EmbargoState
# TODO: configurable timeout
DEFAULT_KILL_TIMEOUT = 3
_logger = logging.getLogger(__name__)
class Embargo(object):
def __init__(self, config, embargo_id=None, state=None,
network=None, docker_client=None):
self.config = config
self.state = state or EmbargoState(embargo_id=embargo_id)
self.network = network
try:
self._audit = audit.EventAuditor(self.state.get_audit_file())
except Exception as ex:
_logger.exception(ex)
raise
default_client = docker.APIClient(
**docker.utils.kwargs_from_env(assert_hostname=False)
)
self.docker_client = docker_client or default_client
def create(self, verbose=False, force=False):
container_state = {}
num_containers = len(self.config.sorted_containers)
# we can check if a state file already exists beforehand
if self.state.exists():
raise AlreadyInitializedError('a embargo already exists in here - '
'you may want to destroy it first')
def vprint(msg):
if verbose:
sys.stdout.write(msg)
sys.stdout.flush()
if self.config.is_udn():
# Create custom network to allow docker resolve container hostnames
# via built-in DNS server.
response = self.docker_client.create_network(
self.state.embargo_net_name)
if response['Warning']:
raise EmbargoError("Error while creating network: '%s'" %
(response['Warning']))
for idx, container in enumerate(self.config.sorted_containers):
name = container.name
vprint("\r[%d/%d] Starting '%s' " % (idx+1, num_containers, name))
# in case a startup delay is configured
# we have to wait in here
if container.start_delay > 0:
vprint('(delaying for %s seconds)' % (container.start_delay))
time.sleep(container.start_delay)
container_id = self._start_container(container, force)
container_state[name] = {'id': container_id}
# clear progress line
vprint('\r')
# try to persist container states
self.state.initialize(container_state)
container_descriptions = []
for container in self.config.sorted_containers:
description = self._get_container_description(container.name)
container_descriptions.append(description)
return container_descriptions
def _get_device_id(self, container_id, container_name):
# next we have to determine the veth pair of host/container
# that we formerly could pass in via 'lxc_conf' which is
# deprecated since docker > 1.6
device = None
try:
device = self.network.get_container_device(self.docker_client, container_id)
except OSError as err:
if err.errno in (errno.EACCES, errno.EPERM):
msg = "Failed to determine network device of container '%s' [%s]" % (container_name, container_id)
raise InsufficientPermissionsError(msg)
raise
return device
def __get_container_links(self, container):
links = {}
for link, alias in container.links.items():
link_container = self.config.containers.get(link, None)
if not link_container:
raise EmbargoError("link '%s' of container '%s' does not exist" %
(link, container.name))
name = link_container.get_name(self.state.embargo_id)
links[name] = alias
return links
def _start_container(self, container, force=False):
container_name = container.get_name(self.state.embargo_id)
volumes = list(container.volumes.values()) or None
links = self.__get_container_links(container)
# the docker api for port bindings is `internal:external`
port_bindings = dict((v, k) for k, v in container.publish_ports.items())
if self.config.is_udn():
network_mode = self.state.embargo_net_name
else:
network_mode = None
host_config = self.docker_client.create_host_config(
binds=container.volumes,
dns=container.dns,
port_bindings=port_bindings,
network_mode=network_mode,
ulimits=[{'name': 'core', 'soft': 3145728, 'hard': 4194304}],
links=links,
cap_add=container.cap_add)
def create_container():
# try to create container
response = self.docker_client.create_container(
container.image,
command=container.command,
name=container_name,
ports=container.expose_ports,
volumes=volumes,
hostname=container.hostname,
environment=container.environment,
host_config=host_config,
labels={"embargo.id": self.state.embargo_id})
return response['Id']
try:
container_id = create_container()
except docker.errors.APIError as err:
if err.response.status_code == 409 and err.is_client_error():
# if force is set we are retrying after removing the
# container with that name first
if force and self.__try_remove_container(container_name):
container_id = create_container()
else:
raise EmbargoContainerConflictError(err)
else:
raise
# start container
self.docker_client.start(container_id)
return container_id
def __try_remove_container(self, name):
try:
self.docker_client.remove_container(name, force=True)
return True
except Exception:
# TODO: log error?
return False
def _get_container_description(self, name, network_state=True,
ip_partitions=None):
self.state.load()
state_container = self.state.containers[name]
container_id = state_container['id']
try:
container = self._inspect_container(container_id)
except DockerContainerNotFound:
return Container(name, container_id, ContainerStatus.MISSING)
state_dict = container.get('State')
if state_dict and state_dict.get('Running'):
container_status = ContainerStatus.UP
else:
container_status = ContainerStatus.DOWN
extras = {}
network = container.get('NetworkSettings')
ip = None
if network:
ip = network.get('IPAddress')
networks = network.get('Networks')
if self.config.is_udn():
ip = networks.get(
self.state.embargo_net_name).get('IPAddress')
elif networks and not ip:
if len(networks) == 1:
ip = six.next(six.itervalues(networks)).get('IPAddress')
if ip:
extras['ip_address'] = ip
if (network_state and name in self.state.containers
and container_status == ContainerStatus.UP):
device = self._get_device_id(container_id, name)
extras['device'] = device
extras['network_state'] = self.network.network_state(device)
# include partition ID if we were provided a map of them
if ip_partitions and ip:
extras['partition'] = ip_partitions.get(ip)
else:
extras['network_state'] = NetworkState.UNKNOWN
extras['device'] = None
# lookup 'holy' and 'neutral' containers
# TODO: this might go into the state as well..?
cfg_container = self.config.containers.get(name)
extras['neutral'] = cfg_container.neutral if cfg_container else False
extras['holy'] = cfg_container.holy if cfg_container else False
return Container(name, container_id, container_status, **extras)
def destroy(self, force=False):
containers = self._get_embargo_docker_containers()
for container in list(containers.values()):
container_id = container['Id']
self.docker_client.stop(container_id, timeout=DEFAULT_KILL_TIMEOUT)
self.docker_client.remove_container(container_id)
self.network.restore(self.state.embargo_id)
self.state.destroy()
if self.config.is_udn():
try:
self.docker_client.remove_network(self.state.embargo_net_name)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
# Get the containers that are part of the initial Embargo group
def _get_embargo_docker_containers(self):
self.state.load()
containers = {}
filters = {"label": ["embargo.id=" + self.state.embargo_id]}
prefix = self.state.embargo_id + "_"
for container in self.docker_client.containers(all=True, filters=filters):
for name in container['Names']:
# strip leading '/'
name = name[1:] if name[0] == '/' else name
# strip prefix. containers will have these UNLESS `container_name`
# was specified in the config
name = name[len(prefix):] if name.startswith(prefix) else name
if name in self.state.containers:
containers[name] = container
break
return containers
def _get_docker_containers(self):
self.state.load()
containers = self._get_embargo_docker_containers()
# Search for and add any containers that were added to the state
for state_container_name in self.state.containers:
if state_container_name not in containers.keys():
container_id = self.state.container_id(state_container_name)
filters = {"id": container_id}
for container in self.docker_client.containers(all=True, filters=filters):
containers[state_container_name] = container
return containers
def _get_all_containers(self):
self.state.load()
containers = []
ip_partitions = self.network.get_ip_partitions(self.state.embargo_id)
docker_containers = self._get_docker_containers()
for name in docker_containers.keys():
container = self._get_container_description(name, ip_partitions=ip_partitions)
containers.append(container)
return containers
def status(self):
return self._get_all_containers()
def _get_running_containers(self, container_names=None, select_random=False):
return self._get_containers_with_state(container_names, select_random, ContainerStatus.UP)
def _get_created_containers(self, container_names=None, select_random=False):
return self._get_containers_with_state(container_names, select_random,
ContainerStatus.UP, ContainerStatus.DOWN)
def _get_containers_with_state(self, container_names, select_random, *container_states):
containers = self._get_all_containers()
candidates = dict((c.name, c) for c in containers
if c.status in container_states)
if select_random and candidates:
return [random.choice(list(candidates.values()))]
if container_names is None:
return list(candidates.values())
found = []
for name in container_names:
container = candidates.get(name)
if not container:
raise EmbargoError("Container %s is not found or not any of %s"
% (name, container_states))
found.append(container)
return found
def _get_running_container(self, container_name):
return self._get_running_containers((container_name,))[0]
def __with_running_container_device(self, container_names, func, select_random=False):
message = ""
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
device = self._get_device_id(container.container_id, container.name)
func(device)
return container_names
except Exception as ex:
audit_status = "Failed"
message = str(ex)
raise
finally:
self._audit.log_event(func.__name__, audit_status, message,
container_names)
def flaky(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.flaky, select_random)
def slow(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.slow, select_random)
def duplicate(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.duplicate, select_random)
def fast(self, container_names, select_random=False):
return self.__with_running_container_device(container_names, self.network.fast, select_random)
def restart(self, container_names, select_random=False):
message = ""
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._stop(container)
self._start(container.name)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('restart', audit_status, message,
container_names)
def kill(self, container_names, signal="SIGKILL", select_random=False):
message = ''
audit_status = "Success"
try:
containers = self._get_running_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._kill(container, signal)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('kill', audit_status, message,
container_names)
def _kill(self, container, signal):
self.docker_client.kill(container.container_id, signal)
def stop(self, container_names, select_random=False):
message = ''
audit_status = "Success"
try:
# it is valid to try to stop an already stopped container
containers = self._get_created_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in containers:
self._stop(container)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('stop', audit_status, message,
container_names)
def _stop(self, container):
self.docker_client.stop(container.container_id, timeout=DEFAULT_KILL_TIMEOUT)
def start(self, container_names, select_random=False):
message = ''
audit_status = "Success"
try:
# it is valid to try to start an already running container
containers = self._get_created_containers(container_names, select_random)
container_names = [c.name for c in containers]
for container in container_names:
self._start(container)
return container_names
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('start', audit_status, message,
container_names)
def _start(self, container):
container_id = self.state.container_id(container)
if container_id is None:
return
# TODO: determine between create and/or start?
self.docker_client.start(container_id)
# update state
updated_containers = self.state.containers
updated_containers[container] = {'id': container_id}
def random_partition(self):
containers = [c.name for c in self._get_running_containers()
if not c.holy]
# no containers to partition
if not containers:
return []
num_containers = len(containers)
num_partitions = random.randint(1, num_containers)
# no partition at all -> join
if num_partitions <= 1:
self.join()
return []
else:
pick = lambda: containers.pop(random.randint(0, len(containers)-1))
# pick at least one container for each partition
partitions = [[pick()] for _ in xrange(num_partitions)]
# distribute the rest of the containers among the partitions
for _ in xrange(len(containers)):
random_partition = random.randint(0, num_partitions-1)
partitions[random_partition].append(pick())
self.partition(partitions)
return partitions
def partition(self, partitions):
message = ''
audit_status = "Success"
try:
containers = self._get_running_containers()
container_dict = dict((c.name, c) for c in containers)
partitions = expand_partitions(containers, partitions)
container_partitions = []
for partition in partitions:
container_partitions.append([container_dict[c] for c in partition])
self.network.partition_containers(self.state.embargo_id,
container_partitions)
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('partition', audit_status, message,
partitions)
def join(self):
message = ''
audit_status = "Success"
try:
self.state.load()
self.network.restore(self.state.embargo_id)
except Exception as ex:
message = str(ex)
audit_status = "Failed"
raise
finally:
self._audit.log_event('join', audit_status, message, [])
def logs(self, container_name):
container = self._get_running_container(container_name)
return self.docker_client.logs(container.container_id)
def _inspect_container(self, container_id):
try:
return self.docker_client.inspect_container(container_id)
except docker.errors.APIError as err:
if err.response.status_code == 404:
err_msg = "Aborting. Docker container not found: %s"
raise DockerContainerNotFound(err_msg % container_id)
else:
raise
# containers can be the Docker ID or name
def add_container(self, containers):
if self.state.exists():
self.state.load()
updated_containers = self.state.containers
for container in containers:
container_info = self._inspect_container(container)
container_id = container_info.get('Id')
if container_id.startswith(container):
# if container is the docker id, use the partial docker id
name = container_id[:12]
else:
name = container
# check if this name is already in the state file
if self.state.container_id(name) is not None:
continue
updated_containers[name] = {'id': container_id}
# persist the state
self.state.update(updated_containers)
def get_audit(self):
return self._audit
class Container(object):
ip_address = None
network_state = NetworkState.NORMAL
partition = None
def __init__(self, name, container_id, status, **kwargs):
self.name = name
self.container_id = container_id
self.status = status
self.holy = False
self.neutral = False
for k, v in kwargs.items():
setattr(self, k, v)
def to_dict(self):
return dict(name=self.name,
container_id=self.container_id,
status=self.status,
ip_address=self.ip_address,
network_state=self.network_state,
partition=self.partition)
class ContainerStatus(object):
'''Possible container status
'''
UP = "UP"
DOWN = "DOWN"
MISSING = "MISSING"
def expand_partitions(containers, partitions):
'''
Validate the partitions of containers. If there are any containers
not in any partition, place them in an new partition.
'''
# filter out holy containers that don't belong
# to any partition at all
all_names = frozenset(c.name for c in containers if not c.holy)
holy_names = frozenset(c.name for c in containers if c.holy)
neutral_names = frozenset(c.name for c in containers if c.neutral)
partitions = [frozenset(p) for p in partitions]
unknown = set()
holy = set()
union = set()
for partition in partitions:
unknown.update(partition - all_names - holy_names)
holy.update(partition - all_names)
union.update(partition)
if unknown:
raise EmbargoError('Partitions contain unknown containers: %s' %
list(unknown))
if holy:
raise EmbargoError('Partitions contain holy containers: %s' %
list(holy))
# put any leftover containers in an implicit partition
leftover = all_names.difference(union)
if leftover:
partitions.append(leftover)
# we create an 'implicit' partition for the neutral containers
# in case they are not part of the leftover anyways
if not neutral_names.issubset(leftover):
partitions.append(neutral_names)
return partitions
| StarcoderdataPython |
3489399 |
"""
Wisconsin Autonomous - https://www.wisconsinautonomous.org
Copyright (c) 2021 wisconsinautonomous.org
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file at the top level of the repo
"""
import unittest
import numpy as np
# WA Simulator
import wa_simulator.utils as utils
# -----
# Tests
# -----
class TestWAUtils(unittest.TestCase):
"""Tests various package level things"""
def test_data_directory(self):
"""Tests the global get_wa_data_directory() variable"""
import os.path
self.assertTrue(os.path.isdir(utils.get_wa_data_directory()))
self.assertTrue(os.path.isfile(utils.get_wa_data_directory() + '/test/test.json')) # noqa
def test_get_wa_data_file(self):
"""Tests the get_wa_data_file method"""
import os.path
self.assertTrue(os.path.isfile(utils.get_wa_data_file('test/test.json'))) # noqa
self.assertFalse(os.path.isfile(utils.get_wa_data_file('test/test2.json'))) # noqa
def test_set_wa_data_directory(self):
"""Tests the set_wa_data_directory method"""
import os
old = utils.get_wa_data_directory()
# Absolute
utils.set_wa_data_directory("/TEST")
self.assertTrue(utils.get_wa_data_directory() == "/TEST")
self.assertTrue(old != "/TEST")
# Relative
utils.set_wa_data_directory("TEST")
self.assertTrue(utils.get_wa_data_directory() == os.path.join(os.getcwd(), "TEST")) # noqa
self.assertTrue(utils.get_wa_data_file('test/test.json') == os.path.join(os.getcwd(), "TEST/test/test.json")) # noqa
self.assertFalse(utils.get_wa_data_file('test/test.json') == os.path.join(os.getcwd(), "TEST/test/test2.json")) # noqa
# Reset
utils.set_wa_data_directory(old)
self.assertTrue(utils.get_wa_data_directory() == old)
def test_set_wa_data_directory_temp(self):
"""Tests the set_wa_data_diretory_temp method"""
import os
old = utils.get_wa_data_directory()
# Absolute
with utils.set_wa_data_directory_temp("/TEST"):
self.assertTrue(utils.get_wa_data_directory() == "/TEST")
self.assertTrue(old != "/TEST")
self.assertFalse(utils.get_wa_data_directory() == "/TEST")
with utils.set_wa_data_directory_temp("TEST"):
self.assertTrue(utils.get_wa_data_directory() == os.path.join(os.getcwd(), "TEST")) # noqa
self.assertTrue(utils.get_wa_data_file('test/test.json') == os.path.join(os.getcwd(), "TEST/test/test.json")) # noqa
self.assertFalse(utils.get_wa_data_file('test/test.json') == os.path.join(os.getcwd(), "TEST/test/test2.json")) # noqa
self.assertTrue(utils.get_wa_data_directory() == old)
def test_check_type(self):
"""Tests the check_type method"""
with self.assertRaises(TypeError):
utils._check_type("TEST", int, "test", "test")
with self.assertRaises(TypeError):
utils._check_type("TEST", bool, "test", "test")
with self.assertRaises(TypeError):
utils._check_type(True, str, "test", "test")
# Doesn't raise
try:
utils._check_type("TEST", str, "test", "test")
utils._check_type(True, bool, "test", "test")
except:
self.fail("Raise exception unexpectedly!")
def test_load_json(self):
"""Tests the load_json method"""
try:
j = utils._load_json(utils.get_wa_data_file('test/test.json'))
except:
self.fail("Raise exception unexpectedly!")
self.assertTrue('Name' in j)
self.assertTrue('Type' in j)
self.assertTrue('Template' in j)
self.assertTrue('Properties' in j)
self.assertTrue(j['Name'] == 'Test GPS Sensor Model')
self.assertTrue('Update Rate' in j['Properties'])
self.assertTrue(isinstance(j['Properties'], dict))
def test_check_field(self):
"""Tests the check_field method"""
j = utils._load_json(utils.get_wa_data_file('test/test.json'))
try:
utils._check_field(j, 'Name')
utils._check_field(j, 'Test', optional=True)
utils._check_field(j, 'Template', value='GPS')
utils._check_field(j, 'Properties', field_type=dict)
utils._check_field(j['Properties']['Noise Model'], 'Noise Type',
field_type=str, allowed_values=['Normal', 'Test'])
except:
self.fail("Raise exception unexpectedly!")
with self.assertRaises(KeyError):
utils._check_field(j, "Test")
with self.assertRaises(TypeError):
utils._check_field(j, "Name", field_type=bool)
with self.assertRaises(ValueError):
utils._check_field(j, "Name", value='Noise Model')
with self.assertRaises(ValueError):
utils._check_field(j, "Name", value='Noise Model', optional=True)
def test_check_field_allowed_values(self):
"""Tests the check_field_allowed_values method"""
j = utils._load_json(utils.get_wa_data_file('test/test.json'))
try:
utils._check_field_allowed_values(j, 'Name', ['Test GPS Sensor Model', 'Test', 'Type']) # noqa
except:
self.fail("Raise exception unexpectedly!")
with self.assertRaises(ValueError):
utils._check_field_allowed_values(j, 'Name', ['Test', 'Type']) # noqa
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9635963 | # Generated by Django 2.2.10 on 2020-05-15 19:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20200515_2146'),
]
operations = [
migrations.AlterModelOptions(
name='telegrambotlogs',
options={'ordering': ('-created_at',), 'verbose_name': 'Telegram Bot Log', 'verbose_name_plural': 'Telegram Bot Logs'},
),
]
| StarcoderdataPython |
1661694 | """ User Creation main module """
import datetime
import pymysql.cursors
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_ogp import set_ogp
from app_footer import get_page_footer
from app_metatags import get_metatags
from app_title import get_title
from bootstrap import get_bootstrap
from app_loading import get_loading_head, get_loading_body
from app_stylesheet import get_stylesheet
from app_navbar import navbar
from font_awesome import get_font_awesome
from sa_func import get_random_str, get_broker_affiliate_link
from sa_func import go_to_url, get_hash_string, get_random_num
from sa_func import send_email_to_queue, is_ascii_chars
from app_cookie import set_sa_cookie, get_lang, get_refer_by_code, get_sa_theme
from googleanalytics import get_googleanalytics
from sa_db import sa_db_access
ACCESS_OBJ = sa_db_access()
DB_USR = ACCESS_OBJ.username()
DB_PWD = <PASSWORD>_OBJ.password()
DB_NAME = ACCESS_OBJ.db_name()
DB_SRV = ACCESS_OBJ.db_server()
def set_nickname():
""" xxx """
part1 = ''
part2 = ''
num = ''
return_data = ''
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT part_one FROM randwords ORDER BY RAND() LIMIT 1"
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
part1 = row[0]
sql = "SELECT part_two FROM randwords ORDER BY RAND() LIMIT 1"
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
part2 = row[0]
cursor.close()
connection.close()
num = str(get_random_num(99))
return_data = part1 + part2 + num
return return_data
def send_email_notification(broker, name, username):
""" xxx """
lang = 'en'
new_user_welcome_subject = ''
new_user_welcome_content = ''
if not is_ascii_chars(name):
name = ''
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = 'SELECT new_user_welcome_subject, new_user_welcome_content '+\
'FROM email_templates WHERE lang="'+ str(lang) +'"'
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
new_user_welcome_subject = row[0].replace('{fullname}',
name.title())
new_user_welcome_content = row[1].replace('{fullname}',
name.title()).replace('{email}', username.lower())
cursor.close()
connection.close()
send_email_to_queue(username, new_user_welcome_subject,
new_user_welcome_content, 2)
send_email_to_queue('', 'A new user has been created ['+ str(broker) +']',
str(name)+'; '+str(username), 9)
def gen_createuser_page(uid, appname, burl, name, username,
password, from_ip, broker, username_broker,
terminal):
""" xxx """
return_data = ''
if uid == '0':
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_title(appname) +\
get_metatags(burl) +\
set_ogp(burl, 1, '', '') +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() +\
get_stylesheet(burl))
return_data = return_data +\
get_body(get_loading_body(), navbar(burl, 0, terminal) +\
get_user_creation_form(burl, broker) +\
get_page_footer(burl, False),'')
return_data = set_page(return_data)
else:
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT username FROM users WHERE uid = '"+\
str(uid) +"' OR username LIKE '"+\
str(username) +"' "
cursor.execute(sql)
res = cursor.fetchall()
check_exists = ''
for row in res:
check_exists = row[0]
if check_exists == '':
name = name.lower()
nickname = set_nickname()
date_today = datetime.datetime.now()
date_today = date_today.strftime('%Y%m%d')
referred_by_code = get_refer_by_code()
avatar_id = get_random_num(19)
email_subscription = 'ALL'
password = get_hash_string(password)
broker = str(broker)
username_broker = str(username_broker)
sql = "INSERT INTO users(uid, name, nickname, username, "+\
"password, created_on, referred_by_code, avatar_id, "+\
"from_ip,lang,email_subscription,broker,username_broker) "+\
"VALUES ('"+\
str(uid) +"','"+\
str(name) +"','"+\
str(nickname) +"','"+\
str(username) +"','"+\
str(password) +"',"+\
str(date_today) +", '"+\
str(referred_by_code) +"', "+\
str(avatar_id) +", '"+\
str(from_ip) + "', '"+\
str(get_lang()) +"', '"+\
str(email_subscription) +"','"+\
str(broker)+"', '"+\
str(username_broker) +"' )"
cursor.execute(sql)
connection.commit()
return_data = set_sa_cookie(uid,
set_page(
get_head('<meta http-equiv="refresh" content="0;URL=' +\
burl + 'n/?step=c" />') +\
get_body('', '','')))
send_email_notification(broker, name, username)
else:
return_data = 'user already exists :P !'
cursor.close()
connection.close()
return return_data
def get_user_ip_input():
""" xxx """
return_data = ''
return_data = '' +\
'<script type="application/javascript">'+\
' function getIP(json) {'+\
' document.write("<input type=\'hidden\' value=\' ", '+\
'json.ip,"\' id=\'from_ip\' name=\'from_ip\' >");'+\
' }'+\
'</script>'+\
'<script type="application/javascript" '+\
'src="https://api.ipify.org?format=jsonp&callback=getIP"></script>'
return return_data
def get_broker_signin_spec_form(broker):
""" xxx """
return_data = ''
if broker is not None:
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = 'SELECT COUNT(*) FROM brokers WHERE broker_id = "'+ str(broker) +'"'
cursor.execute(sql)
res = cursor.fetchall()
broker_count = 0
for row in res:
broker_count = row[0]
cursor.close()
connection.close()
if str(broker_count) == '1':
l_username_placeholder = str(broker) + ' username or id (optional)'
return_data = ' '+\
' <div>'+\
' <div>'+\
' <div class="input-group input-group-lg">'+\
' <div class="input-group-prepend">'+\
' <span class="input-group-text" '+\
'id="inputGroup-sizing-lg"><i class="fas fa-user-tie"></i></span>'+\
' </div>'+\
' <input type="text" id="username_broker" '+\
'name="username_broker" class="form-control" aria-label="Large" '+\
'aria-describedby="inputGroup-sizing-sm" placeholder="'+\
l_username_placeholder +'" autofocus>'+\
' <input type="hidden" id="broker" '+\
'name="broker" value="'+\
str(broker) +'">'+\
' </div>'+\
' </div>'+\
' </div>'
else:
return_data = ' '+\
'<script>'+\
'window.location.replace("/");'+\
'</script>'
else:
return_data = ' '+\
'<input type="hidden" id="username_broker" '+\
'name="username_broker" value="">'+\
'<input type="hidden" id="broker" '+\
'name="broker" value="">'
return return_data
def get_user_creation_form(burl, broker):
""" xxx """
box_content = ''
l_enter_name = "Enter Name"
l_enter_email = "Enter Email"
l_affiliate_submit_form = ''
l_create_your_account = 'Create your account'
if broker is not None:
l_create_broker_account = "If you do not have a trading account at "+\
str(broker) + ", create one now to trade with Smartalpha: "
l_create_broker_account_btn_label = 'Create' + ' ' +\
str(broker) + ' ' +'account'
uniqid = broker + '_affiliate_link'
l_affiliate_href = go_to_url(get_broker_affiliate_link(broker,
'affiliate'), 'link', uniqid)
l_affiliate_submit_form = go_to_url(get_broker_affiliate_link(broker,
'affiliate'), 'form', uniqid)
l_create_broker_account_btn = '<a '+\
l_affiliate_href + ' target="_blank" class="btn btn-success '+\
'form-signin-btn" role="button">'+\
l_create_broker_account_btn_label +' <i class="fas fa-external-link-alt"></i></a>'
user_creation_header = ''
if broker is None:
user_creation_header = '<div style="text-align: center;">'+\
'<h2>' +\
l_create_your_account +\
'</h2>' +\
'</div>'
else:
user_creation_header = '<div style="text-align: center;">'+\
l_create_broker_account + ' '+\
l_create_broker_account_btn +'</div>'
box_content = l_affiliate_submit_form +\
'<div class="box-top">' +\
' <div class="row">'+\
' <div class="col-lg-12 col-md-12 col-sm-12 col-xs-12">'+\
' <div class="box-part rounded">'+\
' <form method="POST" action="'+\
burl +'n/?uid='+ get_random_str(99) +\
'" style="width: 100%; max-width: 600px; padding: 2%; margin: auto;">'+\
' <div>'+\
' <div>'+\
user_creation_header+\
' <hr>'+\
' </div>'+\
' </div>'+\
get_broker_signin_spec_form(broker) +\
' <div>'+\
' <div>'+\
' <div class="input-group input-group-lg">'+\
' <div class="input-group-prepend">'+\
' <span class="input-group-text" '+\
'id="inputGroup-sizing-lg"><i class="fa fa-user-alt" style="font-size: large;"></i></span>'+\
' </div>'+\
' <input type="text" id="name" name="name" '+\
'class="form-control" aria-label="Large" '+\
'aria-describedby="inputGroup-sizing-sm" placeholder="'+\
l_enter_name +'" required autofocus>'+\
' </div>'+\
' </div>'+\
' </div>'+\
' <div>'+\
' <div>'+\
' <div class="input-group input-group-lg">'+\
' <div class="input-group-prepend">'+\
' <span class="input-group-text" '+\
'id="inputGroup-sizing-lg"><i class="fa fa-at" style="font-size: large;"></i></span>'+\
' </div>'+\
' <input type="email" id="email" '+\
'name="email" class="form-control" aria-label="Large" '+\
'aria-describedby="inputGroup-sizing-sm" placeholder="'+\
l_enter_email +'" required autofocus>'+\
' </div>'+\
' </div>'+\
' </div>'+\
' <div>'+\
' <div>'+\
' <div class="input-group input-group-lg">'+\
' <div class="input-group-prepend">'+\
' <span class="input-group-text" '+\
'id="inputGroup-sizing-lg"><i class="fa fa-key" style="font-size: large;"></i></span>'+\
' </div>'+\
' <input type="password" '+\
'id="password" name="password" class="form-control" aria-label="Large" '+\
'aria-describedby="inputGroup-sizing-sm" placeholder="Password" required>'+\
' </div>'+\
' </div>'+\
' </div>'+\
' <div>'+\
' <div>'+\
' <div> </div>'+\
get_user_ip_input() +\
' <button type="submit" '+\
'class="btn btn-info btn-lg btn-block form-signin-btn">' +\
'Next' + ' <i class="fas fa-arrow-right"></i></button>'+\
' </div>'+\
' </div>'+\
'<div class="expl" style="text-align: center;">'+\
'*We respect your privacy and will never share your email address '+\
'with any person or organization.</div>'+\
' </form>'+\
' </div>'+\
' </div>'+\
' </div>'+\
'</div>'
return box_content
| StarcoderdataPython |
14590 | <filename>netvisor_api_client/schemas/sales_payments/__init__.py
from .list import SalesPaymentListSchema # noqa
| StarcoderdataPython |
11382258 | <gh_stars>0
import sys
from Sale_calc_auto import *
from PyQt4 import QtGui
class MyForm(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# this is where we will bind the event handlers
# This is where we will insert the functions (defs)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myapp = MyForm()
myapp.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1687358 | <reponame>idwagner/pynab<filename>pynab/openapi/api/payee_locations_api.py
# coding: utf-8
"""
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from pynab.openapi.api_client import ApiClient
from pynab.openapi.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class PayeeLocationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_payee_location_by_id(self, budget_id, payee_location_id, **kwargs): # noqa: E501
"""Single payee location # noqa: E501
Returns a single payee location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_location_by_id(budget_id, payee_location_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param payee_location_id: id of payee location (required)
:type payee_location_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PayeeLocationResponse
"""
kwargs['_return_http_data_only'] = True
return self.get_payee_location_by_id_with_http_info(budget_id, payee_location_id, **kwargs) # noqa: E501
def get_payee_location_by_id_with_http_info(self, budget_id, payee_location_id, **kwargs): # noqa: E501
"""Single payee location # noqa: E501
Returns a single payee location # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_location_by_id_with_http_info(budget_id, payee_location_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param payee_location_id: id of payee location (required)
:type payee_location_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PayeeLocationResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'budget_id',
'payee_location_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payee_location_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'budget_id' is set
if self.api_client.client_side_validation and ('budget_id' not in local_var_params or # noqa: E501
local_var_params['budget_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `budget_id` when calling `get_payee_location_by_id`") # noqa: E501
# verify the required parameter 'payee_location_id' is set
if self.api_client.client_side_validation and ('payee_location_id' not in local_var_params or # noqa: E501
local_var_params['payee_location_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `payee_location_id` when calling `get_payee_location_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'budget_id' in local_var_params:
path_params['budget_id'] = local_var_params['budget_id'] # noqa: E501
if 'payee_location_id' in local_var_params:
path_params['payee_location_id'] = local_var_params['payee_location_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
response_types_map = {
200: "PayeeLocationResponse",
404: "ErrorResponse",
}
return self.api_client.call_api(
'/budgets/{budget_id}/payee_locations/{payee_location_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_payee_locations(self, budget_id, **kwargs): # noqa: E501
"""List payee locations # noqa: E501
Returns all payee locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_locations(budget_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PayeeLocationsResponse
"""
kwargs['_return_http_data_only'] = True
return self.get_payee_locations_with_http_info(budget_id, **kwargs) # noqa: E501
def get_payee_locations_with_http_info(self, budget_id, **kwargs): # noqa: E501
"""List payee locations # noqa: E501
Returns all payee locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_locations_with_http_info(budget_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PayeeLocationsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'budget_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payee_locations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'budget_id' is set
if self.api_client.client_side_validation and ('budget_id' not in local_var_params or # noqa: E501
local_var_params['budget_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `budget_id` when calling `get_payee_locations`") # noqa: E501
collection_formats = {}
path_params = {}
if 'budget_id' in local_var_params:
path_params['budget_id'] = local_var_params['budget_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
response_types_map = {
200: "PayeeLocationsResponse",
404: "ErrorResponse",
}
return self.api_client.call_api(
'/budgets/{budget_id}/payee_locations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_payee_locations_by_payee(self, budget_id, payee_id, **kwargs): # noqa: E501
"""List locations for a payee # noqa: E501
Returns all payee locations for a specified payee # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_locations_by_payee(budget_id, payee_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param payee_id: id of payee (required)
:type payee_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PayeeLocationsResponse
"""
kwargs['_return_http_data_only'] = True
return self.get_payee_locations_by_payee_with_http_info(budget_id, payee_id, **kwargs) # noqa: E501
def get_payee_locations_by_payee_with_http_info(self, budget_id, payee_id, **kwargs): # noqa: E501
"""List locations for a payee # noqa: E501
Returns all payee locations for a specified payee # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payee_locations_by_payee_with_http_info(budget_id, payee_id, async_req=True)
>>> result = thread.get()
:param budget_id: The id of the budget. \"last-used\" can be used to specify the last used budget and \"default\" can be used if default budget selection is enabled (see: https://api.youneedabudget.com/#oauth-default-budget). (required)
:type budget_id: str
:param payee_id: id of payee (required)
:type payee_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PayeeLocationsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'budget_id',
'payee_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payee_locations_by_payee" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'budget_id' is set
if self.api_client.client_side_validation and ('budget_id' not in local_var_params or # noqa: E501
local_var_params['budget_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `budget_id` when calling `get_payee_locations_by_payee`") # noqa: E501
# verify the required parameter 'payee_id' is set
if self.api_client.client_side_validation and ('payee_id' not in local_var_params or # noqa: E501
local_var_params['payee_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `payee_id` when calling `get_payee_locations_by_payee`") # noqa: E501
collection_formats = {}
path_params = {}
if 'budget_id' in local_var_params:
path_params['budget_id'] = local_var_params['budget_id'] # noqa: E501
if 'payee_id' in local_var_params:
path_params['payee_id'] = local_var_params['payee_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearer'] # noqa: E501
response_types_map = {
200: "PayeeLocationsResponse",
404: "ErrorResponse",
}
return self.api_client.call_api(
'/budgets/{budget_id}/payees/{payee_id}/payee_locations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| StarcoderdataPython |
3599862 | <gh_stars>1-10
"""
Topics to be explored:
- Lattice Approximations of Continuous Space Manifolds
- Finding an embedding of a neural network in R^3
- Neural Field Models for particle dynamics and stochastic
dynamics on neural manifolds
- Intrinsic Dimensionality of a Graph
An idea that occurred to me yesterday relates to the
"*planar dimensionality of a graph*" which means the
minimal number of dimensions necessary in which to
project the graph such that no edges intersect with
eachother. For example, the intrinsic dimensionality
of a planar graph is $2$. A graph for which intersections
only exist between one single node $n_i$ and any number
of other nodes $n_j, j\ne i$, embedding this graph in
3 dimensional will remove any line intersections simply
by the definition of a line emanating from a point
(because the only place the line segments representing
edges intersect is at the node itself and therefore
they intersect nowhere else).
Once you can find the dimensionality of a graph as well
as an appropriate embedding of the graph in those
dimensions (using someforce based spring layout model) then things get interesting.
If the graph has intrinsic dimensionality $n$, by
projecting the graph into dimensions $n+1$ and
force laying out the graph in these dimensions
you obtain LATTICE APPROXIMATION OF A CONTINUOUS SPACE CURVE.
The position of a node along dimension $n+1$
converges such that the euclidean distance between
any two nodes in this $n+1$ space is exactly equal to
their edges distance.
**Now we have found the most perfect intrinsic spatial
embedding of a graph** because:
1. The distance between all the nodes in this space is
exactly equal to the weight of their edges
2. The space approximation created by the graph lattice is continuous.
*NOW* we can start playing with the physics of this high
dimensional graph manifold, for example, by fitting
a field function to the data
"""
import sys
import numpy as np
import scipy.cluster.hierarchy as sch
import pylab
import scipy
import matplotlib.pyplot as plt
import networkx as nx
import numpy.ma as ma
from scipy.integrate import odeint
import matplotlib.cm as cm
import os
import sys
import numpy as np
import scipy.stats
sys.path.append('../src/')
import data_config as dc
"""
By embedding a graph in 3+1 dimensions
we can find a continuous surface on which the
network lives
This is enabled by a theorem in network science
that the probability of edge collisions
for a graph embedded in three dimensions is
zero
"""
import numpy as np
import numpy.ma as ma
from scipy.integrate import odeint
import cv2
def get_dr(y):
n = y.shape[0]
# rj across, ri down
rs_from = np.tile(y, (n,1,1))
# ri across, rj down
rs_to = np.transpose(rs_from, axes=(1,0,2))
# directional distance between each r_i and r_j
# dr_ij is the force from j onto i, i.e. r_i - r_j
dr = rs_to - rs_from
dr = dr.astype(np.float32)
return dr
def get_radii(y):
dR = get_dr(y)
R = np.array(
np.power(
np.sum(np.power(dR, 2.), axis=2),
1./2.
)
).astype(np.float32)
return R
def spring_layout(y,t,w,k,n,d,T):
"""
y: an (n*2,d) dimensional matrix where y[:n]_i
is the position of the ith node in d dimensions
and y[n:]_i is the velocity of the ith node
w: (n,n) matrix of edge weights
"""
y = np.copy(y.reshape((n*2,d)))
x = y[:n]
v = y[n:]
dR = get_dr(x)
# F=0 <=> R=w
# we also add a damping term
F = -k*(dR-w*dR/(np.linalg.norm(dR)))
Fnet = np.sum(F, axis=1) - v
a = Fnet #nodes have unit mass
# Setting velocities
y[:n] = np.copy(y[n:])
# Entering the acceleration into the velocity slot
y[n:] = np.copy(a)
# Flattening it out for scipy.odeint to work
return np.array(y).reshape(n*2*d)
def sim_particles(t, r, v, w, k=1.):
d = r.shape[-1]
n = r.shape[0]
y0 = np.zeros((n*2,d))
y0[:n] = r
y0[n:] = v
y0 = y0.reshape(n*2*d)
w = np.array([w]).reshape( (w.shape[0], w.shape[1], 1) )
yf = odeint(
spring_layout,
y0,
t,
args=(w,k,n,d, t.shape[0])).reshape(t.shape[0],n*2,d)
return yf
def get_data():
kato = dc.kato.data()
data = kato[0]["deltaFOverF_bc"].T
mean = np.mean(data, axis=1, keepdims=True)
standardized = (data-mean)
correlation = data.T.dot(data)
connectome = dc.connectome_networkx.data().to_directed()
adjacency = nx.to_numpy_matrix(connectome)
return {
"data": data,
"correletion": correlation,
"adjacency": adjacency,
"network":connectome
}
def simulate():
data = get_data()
adjacency = data["adjacency"]
t = 10
t_f = 100
t = np.linspace(0, t, num=t_f).astype(np.float32)
# a = 0.
# b = 100.
# r = np.array([
# [a, 0.],
# [a+2.,0.],
# ])
# v = np.array([
# [0.,10.],
# [0., -10.],
# ])
#
# w = np.array([
# [0,1],
# [1,0]
# ]).astype(np.float32)
n = 5
G = nx.grid_2d_graph(n,n)
N = 25
w = nx.to_numpy_matrix(G)*10
r = np.random.rand(N,3)
d = r.shape[-1]
v = r*0.
k=1.
return sim_particles(t,r,v,w)
if __name__=="__main__":
alreadysimulated = os.path.isfile("../data/spaceembedding.npy")
if False:#alreadysimulated:
rf = np.load("../data/spaceembedding.npy")
else:
rf = simulate()
np.save("../data/spaceembedding.npy",rf)
data = get_data()
H = nx.grid_2d_graph(5,5)
pos = np.array(nx.spring_layout(H, dim=3).values())#
pos = rf[-1,:25]
from mayavi import mlab
# reorder nodes from 0,len(G)-1
G=nx.convert_node_labels_to_integers(H)
scalars=np.array(G.nodes())+5
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(pos[:,0], pos[:,1], pos[:,2],
scalars,
scale_factor=0.01,
scale_mode='none',
colormap='Blues',
resolution=20)
pts.mlab_source.dataset.lines = np.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
mlab.savefig('mayavi2_spring.png')
mlab.show() # interactive window
| StarcoderdataPython |
3596254 | <filename>src/FlaUILibrary/keywords/keyboard.py
from robotlibcore import keyword
from FlaUILibrary.flaui.module import (Keyboard, Element)
class KeyboardKeywords:
"""
Interface implementation from robotframework usage for keyboard keywords.
"""
def __init__(self, module):
"""
Constructor for element keywords.
``module`` UIA3 module to handle element interaction.
"""
self._module = module
@keyword
def press_key(self, key_combination, identifier=None, msg=None):
"""
Keyboard control to execute a user defined one shurcut or text.
If identifier set try to attach to given element if
operation was successfully old element will be reattached automatically.
Arguments:
| Argument | Type | Description |
| keys_combination | List of Strings, which should | Text to be typed by keyboard |
| | satisfy one of the following formats: | |
| | - s'<shortcut>' | |
| | - t'<text>' | |
| | Examples: | |
| | - s'CTRL+A' | |
| | - t'JJJ' | |
| | - s'JJJ' will be executed as text | |
| identifier | String *Optional | Optional XPath identifier |
| msg | String *Optional | Custom error message |
XPath syntax is explained in `XPath locator`.
The following keys are supported for usage as a part of key_combination:
| LBUTTON | Left mouse button |
| RBUTTON | Right mouse button |
| CANCEL | Control-break processing |
| MBUTTON | Middle mouse button (three-button mouse) |
| XBUTTON1 | Windows 2000/XP: X1 mouse button |
| XBUTTON2 | Windows 2000/XP: X2 mouse button |
| BACK | BACKSPACE key |
| TAB | TAB key |
| CLEAR | CLEAR key |
| ENTER | ENTER key |
| SHIFT | SHIFT key |
| CTRL | CTRL key |
| ALT | ALT key |
| CAPITAL | CAPITAL key |
| PAUSE | PAUSE key |
| ESCAPE | ESC key |
| ESC | ESC key |
| SPACE | Blank space key |
| NEXT | Next key |
| END | END key |
| HOME | HOME key |
| LEFT | LEFT ARROW key |
| RIGHT | RIGHT ARROW key |
| UP | UP ARROW key |
| DOWN | DOWN ARROW key |
| SELECT | SELECT key |
| PRINT | PRINT key |
| EXECUTE | EXEC key |
| INSERT | INS key |
| DELETE | DEL key |
| HELP | HELP key |
| 0 - 9 | |
| A - Z | |
| F1 - F12 | |
| LWIN | Left Windows key |
| RWIN | Right Windows key |
| APPS | |
| SLEEP | |
| MULTIPLY | '*' key |
| ADD | '+' key |
| SEPARATOR | |
| SUBTRACT | |
| DECIMAL | |
| DIVIDE | |
Example:
| ***** Variables ***** |
| ${KEYBOARD_INPUT_CUT} s'CTRL+X' |
| |
| ***** Test Cases ***** |
| ...Keyboard usage in Test Case... |
| Press Key s'CTRL' ${XPATH_COMBO_BOX_INPUT} |
| Press Key t'A' ${XPATH_COMBO_BOX_INPUT} |
| Press Key s'CTRL+A' ${XPATH_COMBO_BOX_INPUT} |
| Press Key ${KEYBOARD_INPUT_CUT} ${XPATH_COMBO_BOX_INPUT} |
"""
if identifier is not None:
self._module.action(Element.Action.FOCUS_ELEMENT,
Element.Container(xpath=identifier, retries=None, name=None),
msg)
self._module.action(Keyboard.Action.KEY_COMBINATION,
Keyboard.create_value_container(shortcut=key_combination),
msg)
@keyword
def press_keys(self, keys_combinations, identifier=None, msg=None):
"""
Keyboard control to execute a user defined sequence of shortcuts and text values.
If identifier set try to attach to given element if
operation was successfully old element will be reattached automatically.
Arguments:
| Argument | Type | Description |
| keys_combination | List of Strings, which should | Text to be typed by keyboard |
| | satisfy one of the following formats: | |
| | - s'<shortcut>' | |
| | - t'<text>' | |
| | Examples: | |
| | - s'CTRL+A' | |
| | - t'JJJ' | |
| | - s'JJJ' will be executed as text | |
| identifier | String *Optional | Optional XPath identifier |
| msg | String *Optional | Custom error message |
XPath syntax is explained in `XPath locator`.
The list of all key_combinations can be seen under Press Key keyword.
The only difference between both keywords is:
Press Keys supports a sequence of several to be pressed after each other
Press Key supports can only press one key combination at a time
Example:
| ***** Variables ***** |
| @{KEYBOARD_INPUT_SELECT_CUT_TEXT} s'CTRL+A' s'CTRL+X' |
| |
| ***** Test Cases ***** |
| ...Keyboard usage in Test Case... |
| Press Keys ${KEYBOARD_INPUT_SELECT_CUT_TEXT} ${XPATH_COMBO_BOX_INPUT} |
"""
if identifier is not None:
self._module.action(Element.Action.FOCUS_ELEMENT,
Element.Container(xpath=identifier, retries=None, name=None),
msg)
self._module.action(Keyboard.Action.KEYS_COMBINATIONS,
Keyboard.create_value_container(shortcuts=keys_combinations),
msg)
| StarcoderdataPython |
1631053 | from Gateway.SocketRequestHandler import RequestHandler
from Gateway.ThreadedServer import ThreadedServer
import threading, socketserver, socket, time
class SocketServerManager():
def __init__(self, dbConnection, requestQueue, exitFlag):
print('Initialising connections...')
self.exitFlag = exitFlag
self.setupSocketServer(dbConnection,requestQueue)
def setupSocketServer(self, dbConnection, requestQueue):
print('Initialising socket server...')
# Listen on localhost:30000
address = ('localhost', 30000)
server = ThreadedServer(
address,
RequestHandler
)
# Connect to database
server.databaseManager = dbConnection
if not server.databaseManager.checkStatus():
print('Looks like an error occured in database connection. Stopping Authentication service.')
return
# Shared queue to process input from threads
server.requestQueue = requestQueue
ip, port = server.server_address
print('[ GATEWAY ] Running on ', ip, ':', port)
try:
serverThread = threading.Thread(
target = server.serve_forever,
args = (0.5,)
)
# Don't hang on exit, free all threads
serverThread.setDaemon(True)
serverThread.start()
print('Network listen loop running in thread:', serverThread.getName())
# Wait for system exit
while self.exitFlag.value == 0:
time.sleep(3)
# At this point, main server has initiated shutdown.
server.shutdown()
server.server_close()
serverThread.join()
except Exception as error:
print('Gateway error:', error)
server.shutdown()
server.server_close()
finally:
# Reset exit flag
self.exitFlag.value = 0
return | StarcoderdataPython |
6550682 | <filename>playground/fsdet/voc/split1/retentive_rcnn/10shot/dataset/dataset.py<gh_stars>10-100
import logging
import os.path as osp
import xml.etree.ElementTree as ET
import numpy as np
from cvpods.data.datasets.voc import VOCDataset
from cvpods.data.detection_utils import create_keypoint_hflip_indices
from cvpods.data.registry import DATASETS
from cvpods.structures import BoxMode
from cvpods.utils import PathManager
from .paths_route import _PREDEFINED_SPLITS_VOC_FEWSHOT
logger = logging.getLogger(__name__)
@DATASETS.register()
class VOCFewShotDataset(VOCDataset):
def __init__(self, cfg, dataset_name, transforms=[], is_train=True):
super(VOCDataset, self).__init__(
cfg, dataset_name, transforms, is_train
)
voc_fewshot_info = _PREDEFINED_SPLITS_VOC_FEWSHOT
image_root, split = voc_fewshot_info["voc"][self.name]
self.image_root = osp.join(self.data_root, image_root) \
if "://" not in image_root else image_root
self.split = split
# Register few-shot attributes
few_shot_keywords = ["all", "base", "novel"]
self.keepclasses = [n for n in few_shot_keywords
if n in self.name]
assert len(self.keepclasses) == 1, \
"{} contains multiple or no keywords in {}".format(
self.name, few_shot_keywords)
self.keepclasses = self.keepclasses[0]
self.sid = int(self.name.split(self.keepclasses)[1][0])
self.meta = self._get_metadata()
self.dataset_dicts = self._load_annotations()
self._set_group_flag()
self.eval_with_gt = cfg.TEST.get("WITH_GT", False)
# fmt: off
self.data_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on:
# Flip only makes sense in training
self.keypoint_hflip_indices = \
create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
def _get_metadata(self):
thing_classes = eval("PASCAL_VOC_{}_CATEGORIES".format(
self.keepclasses.upper()))[self.sid]
meta = {
"thing_classes": thing_classes,
"evaluator_type":
_PREDEFINED_SPLITS_VOC_FEWSHOT["evaluator_type"]["voc"],
"dirname": self.image_root,
"split": self.split,
"year": int(self.name.split('_')[1]),
}
return meta
def _load_annotations(self):
is_shots = "shot" in self.name
if is_shots:
fileids = {}
print(self.image_root, self.data_root)
split_dir = osp.join(self.data_root, "voc/few_shot_split")
if "seed" in self.name:
shot = self.name.split('_')[-2].split("shot")[0]
seed = int(self.name.split("_seed")[-1])
split_dir = osp.join(split_dir, "seed{}".format(seed))
else:
shot = self.name.split('_')[-1].split("shot")[0]
for cls in self.meta["thing_classes"]:
with PathManager.open(
osp.join(split_dir,
"box_{}shot_{}_train.txt".format(
shot, cls))
) as f:
fileids_ = np.loadtxt(f, dtype=np.str).tolist()
if isinstance(fileids_, str):
fileids_ = [fileids_]
fileids_ = [fid.split('/')[-1].split('.jpg')[0]
for fid in fileids_]
fileids[cls] = fileids_
else:
with PathManager.open(osp.join(
self.image_root, "ImageSets", "Main",
self.split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
if is_shots:
for cls, fileids_ in fileids.items():
dicts_ = []
for fid in fileids_:
year = "2012" if "_" in fid else "2007"
dirname = osp.join(self.data_root, "voc",
"VOC{}".format(year))
anno_file = osp.join(dirname, "Annotations",
fid + ".xml")
jpeg_file = osp.join(dirname, "JPEGImages",
fid + ".jpg")
tree = ET.parse(anno_file)
for obj in tree.findall("object"):
r = {
"file_name": jpeg_file,
"image_id": fid,
"height":
int(tree.findall(
"./size/height")[0].text),
"width":
int(tree.findall(
"./size/width")[0].text),
}
cls_ = obj.find("name").text
if cls != cls_:
continue
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text)
for x in ["xmin", "ymin", "xmax", "ymax"]]
bbox[0] -= 1.0
bbox[1] -= 1.0
instances = [{
"category_id":
self.meta["thing_classes"].index(cls),
"bbox": bbox,
"bbox_mode": BoxMode.XYXY_ABS
}]
r["annotations"] = instances
dicts_.append(r)
if len(dicts_) > int(shot):
dicts_ = np.random.choice(dicts_,
int(shot),
replace=False)
dicts.extend(dicts_)
else:
for fid in fileids:
anno_file = osp.join(self.image_root,
"Annotations", fid + ".xml")
jpeg_file = osp.join(self.image_root,
"JPEGImages", fid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fid,
"height":
int(tree.findall("./size/height")[0].text),
"width":
int(tree.findall("./size/width")[0].text),
}
ignore = False
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
if not (cls in self.meta["thing_classes"]):
ignore = True
break
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text)
for x in ["xmin", "ymin", "xmax", "ymax"]]
bbox[0] -= 1.0
bbox[1] -= 1.0
instances.append({
"category_id":
self.meta["thing_classes"].index(cls),
"bbox": bbox,
"bbox_mode": BoxMode.XYXY_ABS,
})
if ignore:
continue
r["annotations"] = instances
dicts.append(r)
return dicts
# fmt: off
CLASS_NAMES = [
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor",
]
# PASCAL VOC few-shot benchmark splits
PASCAL_VOC_ALL_CATEGORIES = {
1: ['aeroplane', 'bicycle', 'boat', 'bottle', 'car', 'cat', 'chair',
'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'sheep',
'train', 'tvmonitor', 'bird', 'bus', 'cow', 'motorbike', 'sofa'],
2: ['bicycle', 'bird', 'boat', 'bus', 'car', 'cat', 'chair', 'diningtable',
'dog', 'motorbike', 'person', 'pottedplant', 'sheep', 'train',
'tvmonitor', 'aeroplane', 'bottle', 'cow', 'horse', 'sofa'],
3: ['aeroplane', 'bicycle', 'bird', 'bottle', 'bus', 'car', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'train',
'tvmonitor', 'boat', 'cat', 'motorbike', 'sheep', 'sofa'],
}
PASCAL_VOC_NOVEL_CATEGORIES = {
1: ['bird', 'bus', 'cow', 'motorbike', 'sofa'],
2: ['aeroplane', 'bottle', 'cow', 'horse', 'sofa'],
3: ['boat', 'cat', 'motorbike', 'sheep', 'sofa'],
}
PASCAL_VOC_BASE_CATEGORIES = {
1: ['aeroplane', 'bicycle', 'boat', 'bottle', 'car', 'cat', 'chair',
'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'sheep',
'train', 'tvmonitor'],
2: ['bicycle', 'bird', 'boat', 'bus', 'car', 'cat', 'chair', 'diningtable',
'dog', 'motorbike', 'person', 'pottedplant', 'sheep', 'train',
'tvmonitor'],
3: ['aeroplane', 'bicycle', 'bird', 'bottle', 'bus', 'car', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'person', 'pottedplant', 'train',
'tvmonitor'],
}
| StarcoderdataPython |
11231514 | <reponame>kentacho/efprob
"""Tests for HMM.
"""
from efprob import *
from hmm import *
def test_wheather_hmm():
W = Space("Weather", ['C', 'S', 'R'])
A = Space("Activities", ['I', 'O'])
t = chan_fromstates([State([0.5, 0.2, 0.3], W),
State([0.15, 0.8, 0.05], W),
State([0.2, 0.2, 0.6], W)], W)
e = chan_fromstates([flip(0.5,A), flip(0.2,A), flip(0.9,A)], W)
s = point_state('C', W)
h = Hmm(s, t, e)
#
# Stationary state
#
ss = State([0.25, 0.5, 0.25],W)
assert (t >> ss) == ss
#
# Validity of sequence of observations, in two ways
#
assert ((((idn(A @ A) @ tuple_chan(e,t)) \
* (idn(A) @ tuple_chan(e,t)) \
* tuple_chan(e,t)) >> s).MM(1,1,1,0) \
>= (point_pred('O',A) @ point_pred('I',A) @ point_pred('I',A))) \
== 0.1674
assert (s >= ((e << point_pred('O',A)) & \
(t << ((e << point_pred('I',A)) & \
(t << (e << point_pred('I',A))))))) == 0.1674
assert h.forward_of_points(['O', 'I', 'I']) == 0.1674
assert h.backward_of_points(['O', 'I', 'I']) == 0.1674
assert np.isclose(h.validity_of_points(['O', 'I', 'I']), 0.1674)
#
# Filtering with observations
#
s2 = t >> (s / (e << point_pred('O', A)))
s3 = t >> (s2 / (e << point_pred('I', A)))
s4 = t >> (s3 / (e << point_pred('I', A)))
assert s4 == State([1867/6696, 347/1395, 15817/33480], W)
assert h.filter_of_points(['O', 'I', 'I']) \
== State([1867/6696, 347/1395, 15817/33480], W)
def test_hallway():
X = Space("Cells", [1,2,3,4,5])
Y = Space("Walls", [2,3])
s = point_state(3,X)
t = chan_fromstates([State([3/4,1/4,0,0,0],X),
State([1/4,1/2,1/4,0,0],X),
State([0,1/4,1/2,1/4,0],X),
State([0,0,1/4,1/2,1/4],X),
State([0,0,0,1/4,3/4],X)], X)
e = chan_fromstates([State([0,1],Y),
State([1,0],Y),
State([1,0],Y),
State([1,0],Y),
State([0,1],Y)], X)
h = Hmm(s, t, e)
obs = [2,2,3,2,3,3]
#
# Validity computation
#
assert (h >= obs) == 3/512
q5 = e << point_pred(3,Y)
q4 = (e << point_pred(3,Y)) & (t << q5)
q3 = (e << point_pred(2,Y)) & (t << q4)
q2 = (e << point_pred(3,Y)) & (t << q3)
q1 = (e << point_pred(2,Y)) & (t << q2)
q0 = (e << point_pred(2,Y)) & (t << q1)
assert (s >= q0) == 3/512
#
# Filtering computation
#
s2 = t >> (s / (e << point_pred(2,Y)))
assert s2 == State([0, 1/4, 1/2, 1/4, 0], X)
s3 = t >> (s2 / (e << point_pred(2,Y)))
assert s3 == State([1/16, 1/4, 3/8, 1/4, 1/16], X)
s4 = t >> (s3 / (e << point_pred(3,Y)))
assert s4 == State([3/8, 1/8, 0, 1/8, 3/8], X)
s5 = t >> (s4 / (e << point_pred(2,Y)))
assert s5 == State([1/8, 1/4, 1/4, 1/4, 1/8], X)
s6 = t >> (s5 / (e << point_pred(3,Y)))
assert s6 == State([3/8, 1/8, 0, 1/8, 3/8], X)
s7 = t >> (s6 / (e << point_pred(3,Y)))
assert s7 == State([3/8, 1/8, 0, 1/8, 3/8], X)
assert h.filter_of_points([2, 2, 3, 2, 3, 3]) \
== State([3/8, 1/8, 0, 1/8, 3/8], X)
#
# Most likeli sequence
#
assert h.viterbi_of_points(obs) == [3, 2, 1, 2, 1, 1]
| StarcoderdataPython |
8040209 | """
Compare COVID-19 simulation outputs to data.
Used for spatial - covidregion - model
Allow comparison of multiple simulation experiments
"""
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib.dates as mdates
import seaborn as sns
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-e",
"--exp_names",
type=str,
nargs='+',
help="Experiment names to compare, example python data_comparison_spatial_2.py -e exp_name1 exp_name2"
)
parser.add_argument(
"-l",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
return parser.parse_args()
def plot_sim_and_ref(exp_names, ems_nr, first_day, last_day, ymax=10000, logscale=False):
if ems_nr == 0:
region_suffix = "_All"
region_label = 'Illinois'
else:
region_suffix = "_EMS-" + str(ems_nr)
region_label = region_suffix.replace('_EMS-', 'COVID-19 Region ')
outcome_channels, channels, data_channel_names, titles = get_datacomparison_channels()
ref_df = load_ref_df(ems_nr)
fig = plt.figure(figsize=(16, 8))
fig.subplots_adjust(right=0.97, wspace=0.5, left=0.1, hspace=0.9, top=0.95, bottom=0.07)
palette = sns.color_palette('tab10', len(exp_names))
axes = [fig.add_subplot(2, 3, x + 1) for x in range(len(channels))]
for c, channel in enumerate(channels):
ax = axes[c]
for d, exp_name in enumerate(exp_names):
column_list = ['time', 'startdate', 'scen_num', 'sample_num', 'run_num']
for chn in outcome_channels:
column_list.append(chn + "_EMS-" + str(ems_nr))
df = load_sim_data(exp_name, region_suffix, column_list=column_list)
df = df[df['date'].between(first_day, last_day)]
df['critical_with_suspected'] = df['critical']
exp_name_label = str(exp_name.split('_')[-1])
mdf = df.groupby('date')[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()
ax.plot(mdf['date'], mdf['CI_50'], color=palette[d], label=exp_name_label)
ax.fill_between(mdf['date'], mdf['CI_2pt5'], mdf['CI_97pt5'],
color=palette[d], linewidth=0, alpha=0.1)
ax.fill_between(mdf['date'], mdf['CI_25'], mdf['CI_75'],
color=palette[d], linewidth=0, alpha=0.3)
ax.grid(b=True, which='major', color='#999999', linestyle='-', alpha=0.3)
ax.set_title(titles[c], y=0.8, fontsize=12)
axes[-1].legend()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%y'))
ax.set_xlim(first_day, last_day)
if logscale:
ax.set_ylim(0.1, ymax)
ax.set_yscale('log')
ax.plot(ref_df['date'], ref_df[data_channel_names[c]], 'o', color='#303030', linewidth=0, ms=1)
ax.plot(ref_df['date'], ref_df[data_channel_names[c]].rolling(window=7, center=True).mean(), c='k', alpha=1.0)
plt.suptitle(region_label, y=1, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.88)
plot_name = f'compare_to_data_{ems_nr}'
if logscale == False:
plot_name = plot_name + "_nolog"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
if not os.path.exists(os.path.join(plot_path, 'pdf')):
os.makedirs(os.path.join(plot_path, 'pdf'))
plt.savefig(os.path.join(plot_path, plot_name + '.png'))
plt.savefig(os.path.join(plot_path, 'pdf', plot_name + '.pdf'))
if __name__ == '__main__':
args = parse_args()
Location = args.Location
exp_names = ['20210120_IL_ae_base_v1_baseline','20210122_IL_quest_ki13']
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
first_plot_day = pd.Timestamp('2020-02-13')
last_plot_day = pd.Timestamp.today() + pd.Timedelta(15,'days')
plot_path = os.path.join(wdir, 'simulation_output', exp_names[len(exp_names) - 1], '_plots')
"""Get group names"""
grp_list, grp_suffix, grp_numbers = get_group_names(exp_path=os.path.join(wdir, 'simulation_output',exp_names[0]))
for grp_nr in grp_numbers:
print("Start processing region " + str(grp_nr))
#plot_sim_and_ref(exp_names, ems_nr=grp_nr, first_day=first_plot_day,
# last_day=last_plot_day, logscale=True)
plot_sim_and_ref(exp_names, ems_nr=grp_nr, first_day=first_plot_day,
last_day=last_plot_day, logscale=False)
| StarcoderdataPython |
11288866 | # Melhore o jogo do DESAFIO 28 onde o computador vai “pensar” em um número entre 0 e 10.
# Só que agora o jogador vai tentar adivinhar até acertar, mostrando no final quantos
# palpites foram necessários para vencer.
import random
defeats = 0
computer = random.randint(0, 10)
print('O computador está pensando em um número de 0 a 10.')
while True:
while True:
player = int(input('Tente adivinhar o número que o pc está pensando: '))
if 0 <= player <= 10:
break
else:
print('\033[31mResposta inválida!\033[m')
if player == computer:
if defeats == 0:
print('\033[32mParabéns, Você venceu sem derrotas!\033[m')
break
else:
print('\033[32mParabéns, você venceu!\033[m')
print('\033[31mDerrotas:', defeats)
break
print('Você errou! tente denovo.')
defeats += 1
| StarcoderdataPython |
225376 | # coding=utf-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow-transform CsvCoder tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
import tensorflow as tf
from tensorflow_transform.coders import csv_coder
from tensorflow_transform.tf_metadata import dataset_schema
import unittest
class TestCSVCoder(unittest.TestCase):
_COLUMNS = ['numeric1', 'text1', 'category1', 'idx', 'numeric2', 'value',
'boolean1']
# The following input schema has no default values, so any invocations to
# decode with missing values should raise an error. CsvCoderDecodeTest adds
# good coverage for missing value handling.
_INPUT_SCHEMA = dataset_schema.from_feature_spec({
'numeric1': tf.FixedLenFeature(shape=[], dtype=tf.int64),
'numeric2': tf.VarLenFeature(dtype=tf.float32),
'boolean1': tf.FixedLenFeature(shape=[1], dtype=tf.bool),
'text1': tf.FixedLenFeature(shape=[], dtype=tf.string),
'category1': tf.VarLenFeature(dtype=tf.string),
'y': tf.SparseFeature('idx', 'value', tf.float32, 10),
})
_ENCODE_DECODE_CASES = [
# FixedLenFeature scalar int.
('12', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature scalar float without decimal point.
('12', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar boolean.
('True', True, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool)),
# FixedLenFeature scalar boolean.
('False', False, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool)),
# FixedLenFeature length 1 vector int.
('12', [12], False,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature size 1 matrix int.
('12', [[12]], False,
tf.FixedLenFeature(shape=[1, 1], dtype=tf.int64)),
# FixedLenFeature unquoted text.
('this is unquoted text', 'this is unquoted text', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# FixedLenFeature quoted text.
('"this is a ,text"', 'this is a ,text', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# FixedLenFeature scalar numeric with default value.
('4', 4, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)),
# FixedLenFeature scalar text with default value set.
('a test', 'a test', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='d')),
# VarLenFeature text.
('a test', ['a test'], False,
tf.VarLenFeature(dtype=tf.string)),
# SparseFeature float one value.
('5,2.0', ([5], [2.0]), False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature float no values.
(',', ([], []), False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# FixedLenFeature scalar int, multivalent.
('12', 12, True,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature length 1 vector int, multivalent.
('12', [12], True,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature length 2 vector int, multivalent.
('12|14', [12, 14], True,
tf.FixedLenFeature(shape=[2], dtype=tf.int64)),
# FixedLenFeature size 1 matrix int.
('12', [[12]], True,
tf.FixedLenFeature(shape=[1, 1], dtype=tf.int64)),
# FixedLenFeature size (2, 2) matrix int.
('12|13|14|15', [[12, 13], [14, 15]], True,
tf.FixedLenFeature(shape=[2, 2], dtype=tf.int64)),
]
_DECODE_ERROR_CASES = [
# FixedLenFeature scalar numeric missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature length 1 vector numeric missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature length >1 vector.
('1', ValueError,
r'FixedLenFeature \'x\' was not multivalent', False,
tf.FixedLenFeature(shape=[2], dtype=tf.int64)),
# FixedLenFeature scalar text missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# SparseFeature with missing value but present index.
('5,', ValueError,
r'SparseFeature \'x\' has indices and values of different lengths',
False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with missing index but present value.
(',2.0', ValueError,
r'SparseFeature \'x\' has indices and values of different lengths',
False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with negative index.
('-1,2.0', ValueError, r'has index -1 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with index equal to size.
('10,2.0', ValueError, r'has index 10 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with index greater than size.
('11,2.0', ValueError, r'has index 11 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# FixedLenFeature with text missing value.
('test', ValueError, r'could not convert string to float: test', False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar int, multivalent, too many values.
('1|2', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature length 1 int, multivalent, too many values.
('1|2', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[1], dtype=tf.float32)),
# FixedLenFeature length 2 int, multivalent, too few values.
('1', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[2], dtype=tf.float32)),
]
_ENCODE_ERROR_CASES = [
# FixedLenFeature length 2 vector, multivalent with wrong number of
# values.
([1, 2, 3], ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[2], dtype=tf.string))
]
_DECODE_ONLY_CASES = [
# FixedLenFeature scalar float with decimal point.
('12.0', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar float with quoted value.
('"12.0"', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar numeric with missing value and default value set.
('', -1, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)),
# FixedLenFeature scalar text with missing value and default value set.
('', 'd', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='d')),
# FixedLenFeature scalar numeric with missing value and default value set,
# where default value is falsy.
('', 0, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=0)),
# FixedLenFeature scalar text with missing value and default value set,
# where default value is falsy.
('', '', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='')),
# VarLenFeature text with missing value.
('', [], False,
tf.VarLenFeature(dtype=tf.string)),
# FixedLenFeature scalar text with default value set.
('', True, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool, default_value=True)),
]
longMessage = True
def _msg_for_decode_case(self, csv_line, feature_spec):
return 'While decoding "{csv_line}" with FeatureSpec {feature_spec}'.format(
csv_line=csv_line, feature_spec=feature_spec)
def _msg_for_encode_case(self, value, feature_spec):
return 'While encoding {value} with FeatureSpec {feature_spec}'.format(
value=value, feature_spec=feature_spec)
def _assert_encode_decode(self, coder, data, expected_decoded):
decoded = coder.decode(data)
np.testing.assert_equal(decoded, expected_decoded)
encoded = coder.encode(decoded)
np.testing.assert_equal(encoded, data.encode('utf-8'))
decoded_again = coder.decode(encoded)
np.testing.assert_equal(decoded_again, expected_decoded)
def test_csv_coder(self):
data = '12,"this is a ,text",categorical_value,1,89.0,12.0,False'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# Python types.
expected_decoded = {'category1': ['categorical_value'],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1], [12.0])}
self._assert_encode_decode(coder, data, expected_decoded)
# Numpy types.
expected_decoded = {'category1': np.array(['categorical_value']),
'numeric1': np.array(12),
'numeric2': np.array([89.0]),
'boolean1': np.array([False]),
'text1': np.array(['this is a ,text']),
'y': (np.array(1), np.array([12.0]))}
self._assert_encode_decode(coder, data, expected_decoded)
def test_csv_coder_with_unicode(self):
data = u'12,"this is a ,text",שקרכלשהו,1,89.0,12.0,False'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# Python types.
expected_decoded = {
'category1': [u'שקרכלשהו'.encode('utf-8')],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1], [12.0])
}
self._assert_encode_decode(coder, data, expected_decoded)
# Numpy types.
expected_decoded = {
'category1': np.array([u'שקרכלשהו'.encode('utf-8')]),
'numeric1': np.array(12),
'numeric2': np.array([89.0]),
'boolean1': np.array([False]),
'text1': np.array(['this is a ,text']),
'y': (np.array(1), np.array([12.0]))
}
self._assert_encode_decode(coder, data, expected_decoded)
def test_tsv_coder(self):
data = '12\t"this is a \ttext"\tcategorical_value\t1\t89.0\t12.0\tTrue'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA,
delimiter='\t')
expected_decoded = {'category1': ['categorical_value'],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [True],
'text1': 'this is a \ttext',
'y': ([1], [12.0])}
self._assert_encode_decode(coder, data, expected_decoded)
def test_valency(self):
data = ('11|12,"this is a ,text",categorical_value|other_value,1|3,89.0|'
'91.0,12.0|15.0,False')
feature_spec = self._INPUT_SCHEMA.as_feature_spec().copy()
feature_spec['numeric1'] = tf.FixedLenFeature(shape=[2], dtype=tf.int64)
schema = dataset_schema.from_feature_spec(feature_spec)
multivalent_columns = ['numeric1', 'numeric2', 'y']
coder = csv_coder.CsvCoder(self._COLUMNS, schema,
delimiter=',', secondary_delimiter='|',
multivalent_columns=multivalent_columns)
expected_decoded = {'category1': ['categorical_value|other_value'],
'numeric1': [11, 12],
'numeric2': [89.0, 91.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1, 3], [12.0, 15.0])}
self._assert_encode_decode(coder, data, expected_decoded)
# Test successful decoding with a single column.
def testDecode(self):
for csv_line, value, multivalent, feature_spec in (
self._ENCODE_DECODE_CASES + self._DECODE_ONLY_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
np.testing.assert_equal(coder.decode(csv_line), {'x': value},
self._msg_for_decode_case(csv_line, feature_spec))
# Test decode errors with a single column.
def testDecodeErrors(self):
for csv_line, error_type, error_msg, multivalent, feature_spec in (
self._DECODE_ERROR_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
with self.assertRaisesRegexp(
error_type, error_msg,
msg=self._msg_for_decode_case(csv_line, feature_spec)):
# We don't distinguish between errors in the coder constructor and in
# the decode method.
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
coder.decode(csv_line)
# Test successful encoding with a single column.
def testEncode(self):
for csv_line, value, multivalent, feature_spec in self._ENCODE_DECODE_CASES:
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
self.assertEqual(coder.encode({'x': value}), csv_line,
msg=self._msg_for_encode_case(value, feature_spec))
# Test successful encoding with a single column.
def testEncodeErrors(self):
for value, error_type, error_msg, multivalent, feature_spec in (
self._ENCODE_ERROR_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
with self.assertRaisesRegexp(
error_type, error_msg,
msg=self._msg_for_encode_case(value, feature_spec)):
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
coder.encode({'x': value})
def test_missing_data(self):
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
data = '12,,categorical_value,1,89.0,12.0,True'
with self.assertRaisesRegexp(ValueError,
'expected a value on column \'text1\''):
coder.decode(data)
def test_bad_boolean_data(self):
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
data = '12,text value,categorical_value,1,89.0,12.0,0'
with self.assertRaisesRegexp(ValueError,
'expected "True" or "False" as inputs'):
coder.decode(data)
def test_bad_row(self):
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# The data has a more columns than expected.
data = ('12,"this is a ,text",categorical_value,1,89.0,12.0,'
'"oh no, I\'m an error",14')
with self.assertRaisesRegexp(Exception,
'Columns do not match specified csv headers'):
coder.decode(data)
# The data has a fewer columns than expected.
data = '12,"this is a ,text",categorical_value"'
with self.assertRaisesRegexp(Exception,
'Columns do not match specified csv headers'):
coder.decode(data)
def test_column_not_found(self):
with self.assertRaisesRegexp(
ValueError, 'Column not found: '):
csv_coder.CsvCoder([], self._INPUT_SCHEMA)
def test_picklable(self):
encoded_data = '12,"this is a ,text",categorical_value,1,89.0,12.0,False'
expected_decoded = {'category1': ['categorical_value'],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1], [12.0])}
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# Ensure we can pickle right away.
coder = pickle.loads(pickle.dumps(coder))
self._assert_encode_decode(coder, encoded_data, expected_decoded)
# And after use.
coder = pickle.loads(pickle.dumps(coder))
self._assert_encode_decode(coder, encoded_data, expected_decoded)
def test_decode_errors(self):
input_schema = dataset_schema.from_feature_spec({
'b': tf.FixedLenFeature(shape=[], dtype=tf.float32),
'a': tf.FixedLenFeature(shape=[], dtype=tf.string),
})
coder = csv_coder.CsvCoder(column_names=['a', 'b'], schema=input_schema)
# Test bad csv.
with self.assertRaisesRegexp(
csv_coder.DecodeError,
'\'int\' object has no attribute \'encode\': 123'):
coder.decode(123)
# Test extra column.
with self.assertRaisesRegexp(csv_coder.DecodeError,
'Columns do not match specified csv headers'):
coder.decode('1,2,')
# Test missing column.
with self.assertRaisesRegexp(csv_coder.DecodeError,
'Columns do not match specified csv headers'):
coder.decode('a_value')
# Test empty row.
with self.assertRaisesRegexp(csv_coder.DecodeError,
'Columns do not match specified csv headers'):
coder.decode('')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
225070 | import sys
sys.path.extend(['..'])
import tensorflow as tf
import glob
from data_loader.data_generator import DataGenerator
from utils.config import process_config
from utils.utils import get_args
from importlib import import_module
from tqdm import tqdm
import numpy as np
from data.process_images import process_ims
import shutil
def predict():
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
# Pre-process images
print('Pre-process images:')
process_ims(input_dir='../samples/*', output_dir='../samples/processed/', out_height=config.im_height)
# Get Model
model_types = import_module('models.' + config.architecture + '_model')
Model = getattr(model_types, 'Model')
# create tensorflow session
sess = tf.Session()
# create your data generator
data_loader = DataGenerator(config, eval_phase=True, eval_on_test_data=False)
# create instance of the model you want
model = Model(data_loader, config)
# load the model from the best checkpoint
model.load(sess, config.best_model_dir)
x, length, lab_length, y, is_training = tf.get_collection('inputs')
pred = model.prediction
# initialize dataset
data_loader.initialize(sess, is_train=False)
# Progress bar
tt = range(data_loader.num_iterations_val)
# Iterate over batches
predictions, filenames = [], sorted(glob.glob('../samples/*'))
for _ in tqdm(tt):
preds_sparse = sess.run([pred], feed_dict={is_training: False})
# Map numeric predictions with corresponding character labels
preds_out = np.zeros(preds_sparse[0][0][0].dense_shape)
for idx, val in enumerate(preds_sparse[0][0][0].indices):
preds_out[val[0]][val[1]] = preds_sparse[0][0][0].values[idx]
predictions += [''.join([data_loader.char_map_inv[j] for j in preds_out[i]]) for i in range(preds_out.shape[0])]
print('\nPredictions:')
[print('{}: {}'.format(name[name.rfind('/')+1:], model_pred)) for name, model_pred in zip(filenames, predictions)]
shutil.rmtree('../samples/processed/')
if __name__ == '__main__':
predict()
| StarcoderdataPython |
3292753 | <gh_stars>1-10
"""A pigasus demonstration vehicle.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from . import poisson
| StarcoderdataPython |
140805 | <reponame>chiehandlu/pythonlearn<filename>s13_debug_unittest/debug_pdb.py<gh_stars>10-100
import pdb
numbers = [1, 2, 3, 4, 10, -4, -7, 0]
pdb.set_trace() # add this line where you want to start debug mode
def all_even(num_list):
even_numbers = []
for number in num_list:
if number%2 == 0:
even_numbers.extend(number)
return even_numbers
all_even(numbers) | StarcoderdataPython |
1896526 | from pyramid.request import Request
from pypi.services import user_service
from pypi.viewmodels.shared.viewmodel_base import ViewModelBase
class AccountHomeViewModel(ViewModelBase):
def __init__(self, request: Request):
super().__init__(request)
self.user = user_service.find_user_by_id(self.user_id)
| StarcoderdataPython |
1884005 | <gh_stars>0
__author__ = 'raymond'
__all__ = ['BloombergDataReader', 'PriceTokensValidator', 'StockHistoryRecordBuilder',
'TimeStampPriceBuilder', 'TradingDataRowParser']
| StarcoderdataPython |
6490975 | def sum_mul(n, m):
| StarcoderdataPython |
227663 | <reponame>CarlosDomingues/b2sw
"""
Tests related to the planets module.
"""
import unittest
import unittest.mock
from b2sw import planets
class TestPlanets(unittest.TestCase):
"""
Tests related to the Planets class
"""
@unittest.mock.patch('b2sw.planets.Planets')
def test_get_by_id(self, fake_table):
"""
get_by_id should call PynamoDB API correctly.
"""
planets.get_by_id(1)
fake_table.get.assert_called_with(1)
@unittest.mock.patch('b2sw.planets.Planets')
def test_get_by_name(self, fake_table):
"""
get_by_name should call PynamoDB API correctly.
"""
planets.get_by_name('Pluto')
fake_table.scan.assert_called_with(fake_table.name == 'Pluto')
@unittest.mock.patch('b2sw.planets.Planets')
def test_put(self, fake_table):
"""
put should call PynamoDB API correctly.
"""
planets.put(
1,
'Pluto',
['Cold', 'Very Cold'],
['Ice']
)
fake_table().save.assert_called_with()
@unittest.mock.patch('b2sw.planets.Planets')
def test_delete(self, fake_table):
"""
delete should call PynamoDB API correctly.
"""
planets.delete(1)
fake_table.get.assert_called_with(1)
fake_table.get().delete.assert_called_with()
@unittest.mock.patch('b2sw.planets.Planets')
def test_update(self, fake_table):
"""
delete should call PynamoDB API correctly.
"""
planets.update(1, name='New Pluto')
fake_table.get.assert_called_with(1)
fake_table.get().refresh.assert_called_with()
| StarcoderdataPython |
6408011 | import typing
from pydantic import BaseModel
class AuthModel(BaseModel):
passhash: str | StarcoderdataPython |
331703 | <filename>manubot/tests/test_command.py<gh_stars>1-10
import subprocess
import manubot
def test_version():
stdout = subprocess.check_output(["manubot", "--version"], universal_newlines=True)
version_str = f"v{manubot.__version__}"
assert version_str == stdout.rstrip()
def test_missing_subcommand():
process = subprocess.run(
["manubot"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
print(process.stderr)
assert process.returncode == 2
assert "error: the following arguments are required: subcommand" in process.stderr
| StarcoderdataPython |
3247185 | import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.4 $'),
name = cms.untracked.string('$Source: /cvs/CMSSW/CMSSW/DPGAnalysis/Skims/python/ZeroBiasPDSkim_cfg.py,v $'),
annotation = cms.untracked.string('Combined ZeroBias skim')
)
# selection eff on 1000 events
# file:/tmp/malgeri/ZB_vertex.root
# /tmp/malgeri/ZB_vertex.root ( 45 events, 15799040 bytes )
#
#
# This is for testing purposes.
#
#
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# run 136066 lumi~500
'/store/data/Run2010A/ZeroBias/RECO/v1/000/136/066/DE81B1E0-4866-DF11-A76D-0030487CD906.root'),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Run2010A/ZeroBias/RAW/v1/000/136/066/FE1DCAEF-3C66-DF11-A903-000423D98E30.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
#------------------------------------------
# Load standard sequences.
#------------------------------------------
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR10_P_V6::All'
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/Reconstruction_cff")
process.load('Configuration/EventContent/EventContent_cff')
process.FEVTEventContent.outputCommands.append('drop *_MEtoEDMConverter_*_*')
#
# Load common sequences
#
#process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
#process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
#process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
##################################good vertex############################################
process.primaryVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && ndof > 4 && abs(z) <= 15 && position.Rho <= 2"), # tracksSize() > 3 for the older cut
filter = cms.bool(True), # otherwise it won't filter the events, just produce an empty vertex collection.
)
process.noscraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
process.goodvertex=cms.Path(process.primaryVertexFilter+process.noscraping)
process.gvout = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('/tmp/azzi/ZB_vertex.root'),
outputCommands = process.FEVTEventContent.outputCommands,
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('GOODVERTEX')),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('goodvertex')
)
)
#################################logerrorharvester############################################
process.load("FWCore.Modules.logErrorFilter_cfi")
from Configuration.StandardSequences.RawToDigi_Data_cff import gtEvmDigis
process.gtEvmDigis = gtEvmDigis.clone()
process.stableBeam = cms.EDFilter("HLTBeamModeFilter",
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
AllowedBeamMode = cms.vuint32(11),
saveTags = cms.bool(False)
)
process.logerrorpath=cms.Path(process.gtEvmDigis+process.stableBeam+process.logErrorFilter)
process.outlogerr = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTEventContent.outputCommands,
fileName = cms.untracked.string('/tmp/azzi/logerror_filter.root'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('Skim_logerror')),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring("logerrorpath")
))
#===========================================================
###########################################################################
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
#Killed gvskim
#process.outpath = cms.EndPath(process.gvout+process.outlogerr)
process.outpath = cms.EndPath(process.outlogerr)
| StarcoderdataPython |
11360018 | import os
import pickle
from net import Net
from globals import *
import matplotlib.pyplot as plt
import matplotlib
import hashlib
from multiprocessing import Process, Manager
from helpers import HiddenPrints
# font = {#'family' : 'normal',
# # 'weight' : 'bold',
# 'size' : 22}
#
# matplotlib.rc('font', **font)
class MCProcess(Process):
def __init__(self, name, queue, n, p, p_i, max_t, seed, clustering_target, dispersion_target, mc_iterations,
mode):
super().__init__(name=name)
self.name = name
self.n = n
self.p = p
self.p_i = p_i
self.max_t = max_t
self.seed = seed
self.clustering_target = clustering_target
self.dispersion_target = dispersion_target
self.mc_iterations = mc_iterations
self.mode = mode
self.queue = queue
def run(self):
with HiddenPrints():
[mean_counts, meansq_counts, mean_peak, meansq_peak, mean_prevalence, meansq_prevalence, clustering, dispersion] = Net(n=self.n, p=self.p, p_i=self.p_i, max_t=self.max_t, seed=self.seed,
clustering_target=self.clustering_target,
dispersion_target=self.dispersion_target).monte_carlo(
self.mc_iterations, mode=self.mode)
self.queue[self.name] = [mean_counts, meansq_counts, mean_peak, meansq_peak, mean_prevalence, meansq_prevalence, clustering, dispersion]
return
# pickling disabled for now, uncomment plot lines for that
def simple_experiment(n, p, p_i, mc_iterations, max_t, seed=123, mode=None, force_recompute=False, path=None,
clustering: float = None, dispersion=None):
# this creates the net, runs monte carlo on it and saves the resulting timeseries plot, as well as pickles for net and counts
assert not (dispersion and clustering), "Cannot set a dispersion target and " \
"a clustering target at the same time"
if dispersion:
chosen_epsilon = epsilon_disp
else:
chosen_epsilon = epsilon_clustering
if path:
dirname = path
else:
dirname_parent = os.path.dirname(__file__)
dirname = os.path.join(dirname_parent, 'Experiments')
# the cache is now tagged with a hash from all important parameters
# Any change to the model parameters will certainly trigger a recompute now
id_params = (n, p, p_i, mc_iterations, max_t, seed, mode, clustering, dispersion, t_i, t_c, t_r, t_d, t_t, p_q, p_t,
quarantine_time, resolution, chosen_epsilon)
# normal hashes are salted between runs -> use something that is persistent
tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest())
recompute = False
# disables loading pickled results
if force_recompute:
# if false, it looks at saved experiments and reuses those
recompute = True
else:
try:
with open(os.path.join(dirname, tag + "_counts.p"), 'rb') as f:
mean_counts, sd_counts, mean_peak, sd_peak, mean_prevalence, sd_prevalence,clustering,dispersion = pickle.load(f)
# with open(os.path.join(dirname, tag + "_net.p"), 'rb') as f:
# net = pickle.load(f)
print('Experiment results have been loaded from history.')
except FileNotFoundError:
recompute = True
if recompute:
kernels = 7
runsPerKernel = int(mc_iterations / 5)
threads = list()
q = Manager().dict()
for i in range(kernels):
threads.append(
MCProcess('MCProcess_' + str(i), q, n, p, p_i, max_t, seed + i, clustering, dispersion, runsPerKernel,
mode))
threads[-1].start()
mean_counts = list()
meansq_counts = list()
mean_peak = list()
meansq_peak = list()
mean_prevalence = list()
meansq_prevalence = list()
clusterings = list()
disps = list()
for t in threads:
t.join() # wait for all threads to finish
for v in q.values():
mean_counts.append(v[0])
meansq_counts.append(v[1])
mean_peak.append(v[2])
meansq_peak.append(v[3])
mean_prevalence.append(v[4])
meansq_prevalence.append(v[5])
clusterings.append(v[6])
disps.append(v[7])
for t in threads:
t.kill()
mean_counts = sum(mean_counts) / len(mean_counts)
meansq_counts = sum(meansq_counts) / len(meansq_counts)
mean_peak = np.mean(mean_peak)
meansq_peak = np.mean(meansq_peak)
mean_prevalence = np.mean(mean_prevalence)
meansq_prevalence = np.mean(meansq_prevalence)
sd_counts = np.sqrt(meansq_counts-np.square(mean_counts))
sd_peak = np.sqrt(meansq_peak-np.square(mean_peak))
sd_prevalence = np.sqrt(meansq_prevalence-np.square(mean_prevalence))
clustering = np.mean(clusterings)
dispersion = np.mean(disps)
# with open(os.path.join(dirname, tag + '_net.p'), 'wb') as f:
# pickle.dump(net, f)
with open(os.path.join(dirname, tag + '_counts.p'), 'wb') as f:
pickle.dump((mean_counts, sd_counts, mean_peak, sd_peak, mean_prevalence, sd_prevalence,clustering,dispersion), f)
# net.plot_timeseries(counts, save= os.path.join(dirname, tag+'_vis.png'))
exposed = mean_counts[EXP_STATE, :]
infected = mean_counts[INF_STATE, :]
ep_curve = exposed + infected
t_peak = np.argmax(ep_curve, axis=0) # simply take time for peak from mean counts (sloppy)
recovered = mean_counts[REC_STATE, :]
virus_contacts = ep_curve + recovered
sensitivity = max(1, n / 100) # increasing divisor makes this more sensitive
equilib_flag = abs(
virus_contacts[-1] - virus_contacts[-2]) < sensitivity # just a heuristic, see whether roc is low
assert dispersion, 'These should not be None'
return None, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag , clustering, dispersion
| StarcoderdataPython |
1628021 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
]
@pulumi.output_type
class GetUsersResult:
"""
A collection of values returned by getUsers.
"""
def __init__(__self__, directory_id=None, external_id=None, firstname=None, id=None, ids=None, lastname=None, samaccountname=None, user_id=None, username=None):
if directory_id and not isinstance(directory_id, int):
raise TypeError("Expected argument 'directory_id' to be a int")
pulumi.set(__self__, "directory_id", directory_id)
if external_id and not isinstance(external_id, int):
raise TypeError("Expected argument 'external_id' to be a int")
pulumi.set(__self__, "external_id", external_id)
if firstname and not isinstance(firstname, str):
raise TypeError("Expected argument 'firstname' to be a str")
pulumi.set(__self__, "firstname", firstname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if lastname and not isinstance(lastname, str):
raise TypeError("Expected argument 'lastname' to be a str")
pulumi.set(__self__, "lastname", lastname)
if samaccountname and not isinstance(samaccountname, str):
raise TypeError("Expected argument 'samaccountname' to be a str")
pulumi.set(__self__, "samaccountname", samaccountname)
if user_id and not isinstance(user_id, str):
raise TypeError("Expected argument 'user_id' to be a str")
pulumi.set(__self__, "user_id", user_id)
if username and not isinstance(username, str):
raise TypeError("Expected argument 'username' to be a str")
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[int]:
return pulumi.get(self, "directory_id")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[int]:
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def firstname(self) -> Optional[str]:
return pulumi.get(self, "firstname")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter
def lastname(self) -> Optional[str]:
return pulumi.get(self, "lastname")
@property
@pulumi.getter
def samaccountname(self) -> Optional[str]:
return pulumi.get(self, "samaccountname")
@property
@pulumi.getter(name="userId")
def user_id(self) -> Optional[str]:
return pulumi.get(self, "user_id")
@property
@pulumi.getter
def username(self) -> Optional[str]:
return pulumi.get(self, "username")
class AwaitableGetUsersResult(GetUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUsersResult(
directory_id=self.directory_id,
external_id=self.external_id,
firstname=self.firstname,
id=self.id,
ids=self.ids,
lastname=self.lastname,
samaccountname=self.samaccountname,
user_id=self.user_id,
username=self.username)
def get_users(directory_id: Optional[int] = None,
external_id: Optional[int] = None,
firstname: Optional[str] = None,
lastname: Optional[str] = None,
samaccountname: Optional[str] = None,
user_id: Optional[str] = None,
username: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['directoryId'] = directory_id
__args__['externalId'] = external_id
__args__['firstname'] = firstname
__args__['lastname'] = lastname
__args__['samaccountname'] = samaccountname
__args__['userId'] = user_id
__args__['username'] = username
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('onelogin:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
directory_id=__ret__.directory_id,
external_id=__ret__.external_id,
firstname=__ret__.firstname,
id=__ret__.id,
ids=__ret__.ids,
lastname=__ret__.lastname,
samaccountname=__ret__.samaccountname,
user_id=__ret__.user_id,
username=__ret__.username)
| StarcoderdataPython |
9717459 | <reponame>zaurelzo/Projet-Tutore-Cubsat-Insa-Toulouse
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from client_server import Client
##
# Class which saves server configuration information
# basically this class saves : ip, port,client id , number of packet to send , type of protocol (ie : spi , i2c ,uart) , time betwen packets
class ConfigurationServer:
def __init__(self):
self.ip = 'localhost'
self.port = -1
self.client = None
self.protocol = -1
self.time = -1
self.packet_number = 0
##
# @return ip to connect
def get_ip(self):
return self.ip
##
# @return port to connect
def get_port(self):
return self.port
##
# set ip value
# @param ip : ip value
def set_ip(self, ip):
self.ip = ip
##
# set port value
# @param port: port value
def set_port(self, port):
self.port = port
##
# set protocol value(ie i2c or uart or spi)
# @param bus_protocol : protocol value
def set_protocole(self, bus_protocol):
if bus_protocol == "UART":
self.protocol = 0
elif bus_protocol == "SPI":
self.protocol = 1
elif bus_protocol == "I2C":
self.protocol = 2
##
# @return protocol choice
def get_protocol(self):
return self.protocol
##
# set number of packet to send
def set_packet_number(self, number):
self.packet_number = number
##
# @return number of packet to send
def get_packet_number(self):
return self.packet_number
##
# set time between packets
# @param time : time value between packets
def set_packet_time(self, time):
self.time = time
##
# connect function
# @return True if the connection success, False otherwise
def connect(self):
print(self.ip)
print(self.port)
self.client = Client.Client(self.ip, self.port)
if self.client.connect():
print("Connected")
return True
return False # there was an error while connecting
##
# disconnect Function
def disconnect(self):
self.client.close_socket()
print("Connection closed")
##
# send a mess to the raspberry with the basic configuration to run
def send_mes(self):
if self.client is not None:
self.client.send_message(self.packet_number, self.protocol, self.time)
# print("data send")
| StarcoderdataPython |
265059 | from django.views import View
from django.http import HttpRequest, JsonResponse
from app.models import Company
from app.core.mixin.base import UserLoggedInRequiredMixinJSON
class UserCompany(View, UserLoggedInRequiredMixinJSON):
"""
URL: /api/v1/user/company/
SUPPORT_METHOD: ['GET']
HELP: Retrieve all company belongs to current user
PERMISSION: User logged in
RETURN: JSON response
"""
def get(self, request: HttpRequest):
company = list(Company.objects.filter(owner_name=request.user.username).values())
return JsonResponse(company, safe=False) | StarcoderdataPython |
254462 | import os
import glob
import argparse
import zipfile
import platform
import multiprocessing
from multiprocessing import Pool
import tqdm
import numpy as np
import tracker
import cv2
from metrics import evaluate
def track_video(model:str, anno_file:str, folder:str, outdir:str) -> tracker.Tracker:
def write_bbox(bbox, f):
f.write(','.join(map(str, bbox)) + '\n')
model = tracker.factory(model, timed=True)
anno_folder = os.path.join(folder, "anno/")
image_folder = os.path.join(folder, "zips/")
base_name = os.path.basename(anno_file)[:-4]
# get first bbox
with open(anno_file, "r") as f:
first_bbox = tuple(map(float,f.readline().split(",")))
# get tracking
with open(os.path.join(outdir, base_name + ".txt"), 'w') as f:
# write first bbox
write_bbox(first_bbox, f)
# get corresponding .zip file
z = zipfile.ZipFile(os.path.join(image_folder, base_name + ".zip"))
names = [name for name in z.namelist() if name.endswith('.jpg')]
names.sort(key=lambda s: int(s[:-4]))
image_gen = (cv2.imdecode(np.frombuffer(z.read(name), np.uint8), 1) for name in names)
for image, (_, bbox) in model.predict_frames(image_gen, bbox=first_bbox):
if not _:
write_bbox((0,0,0,0), f) # failure case
else:
write_bbox(bbox,f)
return model.time, model.ptime, model.frames
def unpack_track(args):
return track_video(*args)
def track(model:str, folder:str, outdir:str, processes:int=1):
anno_folder = os.path.join(folder, "anno/")
image_folder = os.path.join(folder, "zips/")
print("Folder: %s" % folder)
anno_files = glob.glob(os.path.join(anno_folder, "*.txt"))
time = 0.
ptime = 0.
frames = 0
if platform.system() == "Darwin":
multiprocessing.set_start_method('spawn')
with Pool(processes=processes) as p:
with tqdm.tqdm(total=len(anno_files)) as pbar:
args = [(model, anno_files[i], folder, outdir) for i in range(len(anno_files))]
for avg_time, avg_cpu, _frames in p.imap_unordered(unpack_track, args):
time += avg_time
ptime += avg_cpu
frames += _frames
pbar.update()
print("\n=======\nAvg tracking time per frame: %s\n=======\n" % (time / frames))
return time / frames, ptime / frames, frames
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--folder", help="path to training folder with anno/ and zips/", type=str, required=True)
parser.add_argument("-o", "--outdir", help="folder to write your subm.txt files", type=str, required=True)
parser.add_argument("-s", "--summary", help="summary filepath", type=str, default="summary.csv")
parser.add_argument("-m", "--model", help="model type ('CSRT', 'KCF', 'GOTURN')", type=str, choices=['CSRT', 'KCF', 'GOTURN'], required=True)
parser.add_argument("-p", "--processors", help=f"number of processors to be used. You have: {os.cpu_count()} logical processors", type=int, default=1)
args = parser.parse_args()
os.makedirs(args.outdir, exist_ok=True)
print("Starting track...")
avg_time, avg_cpu_time, frames = track(args.model, args.folder, args.outdir, processes=args.processors)
print("Evaluating...")
Success_Average, Precision_Average, NPrecision_Average = evaluate(sorted(glob.glob(os.path.join(args.outdir, "*.txt"))), sorted(glob.glob(os.path.join(args.folder, "anno", "*.txt"))))
if not os.path.exists(args.summary):
with open(args.summary, 'w') as f: f.write('"Model","Folder name","Success Average","Precision Average","NPrecision_average","Frames per second","CPU Usage per frame", "Frames computed"\n')
with open(args.summary, 'a') as f:
f.write(",".join(map(lambda s: '"%s"' % s, (args.model,
args.folder,
Success_Average,
Precision_Average,
NPrecision_Average,
1/avg_time,
avg_cpu_time,
frames))) + "\n")
| StarcoderdataPython |
4871172 |
from matplotlib import pyplot as plt
import pandas as pd
from src.models.schemas import *
from src.models.defaults import *
from src.models.states_and_functions import *
class Visualize:
def __init__(self, params, df_individuals, expected_case_severity, logger):
self._params = params
self.df_progression_times = None
self.df_infections = None
self.df_individuals = df_individuals
self.serial_interval_median = None
self.fear = None
self.active_people = None
self._max_time_offset = 0
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
self._max_time_offset = np.inf
self.detected_cases = None
self._expected_case_severity = expected_case_severity
self.logger = logger
self._all_runs_detected = []
self._all_runs_prevalence = []
self._all_runs_severe = []
@property
def xlim(self):
return (self._params.get(PLOT_XLIM_LEFT, default_plot_xlim_left),
self._params.get(PLOT_XLIM_RIGHT, default_plot_xlim_right))
@property
def ylim(self):
return (self._params.get(PLOT_YLIM_BOTTOM, default_plot_ylim_bottom),
self._params.get(PLOT_YLIM_TOP, default_plot_ylim_top))
@property
def xlim_cut(self):
left = self._params.get(PLOT_XLIM_CUT_LEFT, None)
right = self._params.get(PLOT_XLIM_CUT_RIGHT, None)
if left is None:
left = self.xlim[0]
if right is None:
right = self.xlim[1]
return (left, right)
@property
def ylim_cut(self):
bottom = self._params.get(PLOT_YLIM_CUT_BOTTOM, None)
top = self._params.get(PLOT_YLIM_CUT_TOP, None)
if bottom is None:
bottom = self.ylim[0]
if top is None:
top = self.ylim[1]
return (bottom, top)
def visualize_scenario(self, simulation_output_dir):
fitting_successes = self.test_detected_cases(simulation_output_dir)
q_ = self._params[DETECTION_MILD_PROBA]
rstar_out = 2.34 * self._params[TRANSMISSION_PROBABILITIES][CONSTANT]
c = self._params[FEAR_FACTORS].get(CONSTANT, self._params[FEAR_FACTORS][DEFAULT])[LIMIT_VALUE]
fitting_successes_str = f'q,rstar,c,successes\n{q_},{rstar_out},{c},{fitting_successes}\n'
fitting_successes_log_file = os.path.join(simulation_output_dir, 'fitting_successes.txt')
with open(fitting_successes_log_file, "w") as out_fitting:
out_fitting.write(fitting_successes_str)
self.test_lognormal_prevalence(simulation_output_dir)
self.test_lognormal_detected(simulation_output_dir)
self.test_lognormal_severe(simulation_output_dir)
def visualize_simulation(self, simulation_output_dir, serial_interval, fear, active_people, max_time_offset,
detected_cases, df_progression_times, df_infections):
self.df_progression_times = df_progression_times
self.df_infections = df_infections
self.serial_interval_median = serial_interval
self.fear = fear
self.active_people = active_people
self._max_time_offset = max_time_offset
self.detected_cases = detected_cases
serial_interval_median = self.serial_interval_median
hack = self._params[EXPERIMENT_ID]
c = self._params[TRANSMISSION_PROBABILITIES][CONSTANT]
c_norm = c * self._params[AVERAGE_INFECTIVITY_TIME_CONSTANT_KERNEL]
det = self._params[DETECTION_MILD_PROBA] * 100
reduced_r = c_norm * self.fear(CONSTANT)
self._params[EXPERIMENT_ID] = f'{self._params[EXPERIMENT_ID]}\n(median serial interval: {serial_interval_median:.2f} days, R*: {c_norm:.3f}'
if self._params[TURN_ON_DETECTION]:
self._params[EXPERIMENT_ID] = f'{self._params[EXPERIMENT_ID]}, Det: {det:.1f}%)'
else:
self._params[EXPERIMENT_ID] = f'{self._params[EXPERIMENT_ID]})'
if self._params[FEAR_FACTORS].get(CONSTANT, self._params[FEAR_FACTORS][DEFAULT])[FEAR_FUNCTION] != FearFunctions.FearDisabled:
self._params[EXPERIMENT_ID] = f'{self._params[EXPERIMENT_ID]}\n reduction factor: {(1 - self.fear(CONSTANT)):.3f}, reduced R*: {reduced_r:.3f}'
self.lancet_store_graphs(simulation_output_dir)
self.lancet_store_bins(simulation_output_dir)
self.store_bins(simulation_output_dir)
#self.store_bins_pl(simulation_output_dir)
self.store_graphs(simulation_output_dir)
self.store_detections(simulation_output_dir)
self.store_semilogy(simulation_output_dir)
self.doubling_time(simulation_output_dir)
self.lancet_icu_beds(simulation_output_dir)
self.icu_beds(simulation_output_dir)
self.lancet_draw_death_age_cohorts(simulation_output_dir)
self._params[EXPERIMENT_ID] = hack
def doubling_time(self, simulation_output_dir):
def doubling(x, y, window=100):
x1 = x[:-window]
x2 = x[window:]
y1 = y[:-window]
y2 = y[window:]
a = (x2 - x1) * np.log(2)
b = np.log(y2 / y1)
c = a / b
return c # (x2 - x1) * np.log(2) / np.log(y2 / y1)
def plot_doubling(x, ax, label, window=100):
if len(x) > window:
xval = x[:-window]
yval = doubling(x.values, np.arange(1, 1 + len(x)))
ax.plot(xval[yval < 28], yval[yval < 28], label=label)
return True
return False
fig, ax = plt.subplots(nrows=1, ncols=1)
df_r1 = self.df_progression_times
df_r2 = self.df_infections
vals = df_r2.contraction_time.sort_values()
plot_doubling(vals, ax, label='Trend line for prevalence')
cond1 = df_r2.contraction_time[df_r2.kernel == 'import_intensity'].sort_values()
cond2 = df_r2.contraction_time[df_r2.kernel == 'constant'].sort_values()
cond3 = df_r2.contraction_time[df_r2.kernel == 'household'].sort_values()
plot_doubling(cond1, ax, label='Trend line for # imported cases')
plot_doubling(cond2, ax, label='Trend line for Infected through constant kernel')
plot_doubling(cond3, ax, label='Trend line for Infected through household kernel')
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
plot_doubling(ho_cases, ax, label='Trend line for # hospitalized cases')
plot_doubling(d_cases, ax, label='Trend line for # deceased cases')
ax.legend(loc='lower right') # legend, loc='upper left')
ax.set_title(f'Doubling times for simulation of covid19 dynamics\n {self._params[EXPERIMENT_ID]}')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'doubling_times.png'))
plt.close(fig)
def lancet_draw_death_age_cohorts(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
df_in = self.df_individuals
lims = default_age_cohorts_with_descriptions
fig, ax = plt.subplots(nrows=1, ncols=1)
for limm, limM, descr in lims:
cond1 = df_in.age >= limm
cond2 = df_in.age < limM
cond = np.logical_and(cond1, cond2)
filtered = df_r1.loc[df_r1.index.isin(df_in[cond].index)]
death_cases = filtered[~filtered.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
d_times = np.arange(1, 1 + len(d_cases))
ax.plot(np.append(d_cases, df_r2.contraction_time.max(axis=0)),
np.append(d_times, len(d_cases)), label=descr)
ax.legend()
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'lancet_supplementary_deceased_cases_age_analysis.png'))
plt.close(fig)
def draw_death_age_cohorts(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
df_in = self.df_individuals
lims = default_age_cohorts_with_descriptions
fig, ax = plt.subplots(nrows=1, ncols=1)
for limm, limM, descr in lims:
cond1 = df_in.age >= limm
cond2 = df_in.age < limM
cond = np.logical_and(cond1, cond2)
filtered = df_r1.loc[df_r1.index.isin(df_in[cond].index)]
death_cases = filtered[~filtered.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
d_times = np.arange(1, 1 + len(d_cases))
ax.plot(np.append(d_cases, df_r2.contraction_time.max(axis=0)),
np.append(d_times, len(d_cases)), label=descr)
ax.legend()
experiment_id = self._params[EXPERIMENT_ID]
ax.set_title(f'cumulative deceased cases per age group \n {experiment_id}')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'deceased_cases_age_analysis.png'))
plt.close(fig)
def lancet_store_bins(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, ax0 = plt.subplots()
r2_max_time = df_r2.contraction_time.max()
if self.active_people < 10:
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
r2_max_time -= self._max_time_offset
ax0.plot([r2_max_time], [0], 'ro', markersize=5, label='Last reported infection time')
bins = np.arange(np.minimum(730, int(1 + r2_max_time)))
cond3 = df_r2.contraction_time.sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
cond3 -= self._max_time_offset
legend = []
arr = []
if len(cond3) > 0:
arr.append(cond3)
legend.append('Infections')
ax0.hist(arr, bins, histtype='bar', stacked=False, label=legend)
arr = []
legend = []
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= r2_max_time].sort_values()
if len(ho_cases) > 0:
arr.append(ho_cases)
legend.append('Hospitalized')
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
ho_cases -= self._max_time_offset
ax0.hist(arr, bins, histtype='bar', stacked=False, label=legend)
ax0.legend()
ax0.set_ylabel('Incidents')
ax0.set_xlabel('Time in days')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'lancet_bins.png'))
plt.close(fig)
def store_bins_pl(self, simulation_output_dir):
# font = {'family': 'arial',
# 'weight': 'regular',
# 'size': 16}
# matplotlib.rc('font', **font)
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, (ax2, ax1, ax0) = plt.subplots(nrows=3, ncols=1, figsize=(10, 10))
r2_max_time = df_r2.contraction_time.max()
detected = df_r1.dropna(subset=['tdetection']).sort_values(by='tdetection').tdetection
xloc = [3, 8, 13, 18, 23, 28]
dates = ['13/03/20', '18/03/20', '23/03/20', '28/03/20', '2/03/20', '7/04/20']
bins = np.arange(np.minimum(730, int(1 + r2_max_time)))
cond2 = df_r2.contraction_time[df_r2.kernel == 'constant'].sort_values()
cond3 = df_r2.contraction_time[df_r2.kernel == 'household'].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
cond2 -= self._max_time_offset - 3
cond3 -= self._max_time_offset - 3
legend = []
arr = []
if len(cond2) > 0:
arr.append(cond2)
legend.append('Zarażenia poza domem')
if len(cond3) > 0:
arr.append(cond3)
legend.append('Zarażenia w gosp. domowym')
values, _, _ = ax0.hist(arr, bins, histtype='bar', stacked=True, label=legend, color=['blue', 'grey'])
# ax0.plot([3]*2, [0, np.amax(values)], 'k-', label='Ogłoszenie zamknięcia <NAME> 13/03/2020')
ax0.legend()
ax0.set_ylabel('Zainfekowani')
ax0.set_xlabel('Data')
ax0.set_xticks(xloc)
ax0.set_xticklabels(dates)
ax0.set_xlim([0, 30])
arr = []
legend = []
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= r2_max_time].sort_values()
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= r2_max_time].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
ho_cases -= self._max_time_offset - 3
d_cases -= self._max_time_offset - 3
if len(d_cases) > 0:
arr.append(d_cases)
legend.append('Przypadki śmiertelne')
if len(ho_cases) > 0:
arr.append(ho_cases)
legend.append('Hospitalizowani')
values, _, _ = ax1.hist(arr, bins, histtype='bar', stacked=True, label=legend, color=['red', 'orange'])
# ax1.plot([3]*2, [0, np.amax(values)], 'k-', label='Ogłoszenie zamknięcia <NAME> 13/03/2020')
ax1.set_xlim([0, 30])
ax1.set_ylabel('Przypadki poważne')
ax1.set_xlabel('Data')
ax1.legend()
ax1.set_xticks(xloc)
ax1.set_xticklabels(dates)
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
det_cases -= self._max_time_offset - 3
values, _, _ = ax2.hist(det_cases, bins, histtype='bar', stacked=True, label='Zdiagnozowani', color='green')
# ax2.plot([3]*2, [0, np.amax(values)], 'k-', label='Ogłoszenie zamknięcia <NAME> 13/03/2020')
ax2.set_xlim([0, 30])
ax2.set_ylabel('Zdiagnozowani')
ax2.set_xlabel('Data')
ax2.set_xticks(xloc)
ax2.set_xticklabels(dates)
# ax2.legend()
fig.tight_layout()
# plt.show()
plt.savefig(os.path.join(simulation_output_dir, 'bins_report_pl.png'), dpi=300)
plt.close(fig)
def store_bins(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
if self._params[TURN_ON_DETECTION]:
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, ncols=1)
else:
fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1)
r2_max_time = df_r2.contraction_time.max()
if self.active_people < 10:
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
r2_max_time -= self._max_time_offset
ax0.plot([r2_max_time], [0], 'ro', markersize=5, label='Last reported infection time')
bins = np.arange(np.minimum(730, int(1 + r2_max_time)))
cond1 = df_r2.contraction_time[df_r2.kernel == 'import_intensity'].sort_values()
cond2 = df_r2.contraction_time[df_r2.kernel == 'constant'].sort_values()
cond3 = df_r2.contraction_time[df_r2.kernel == 'household'].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
cond1 -= self._max_time_offset
cond2 -= self._max_time_offset
cond3 -= self._max_time_offset
legend = []
arr = []
if len(cond1) > 0:
arr.append(cond1)
legend.append('Imported')
if len(cond2) > 0:
arr.append(cond2)
legend.append('Constant kernel')
if len(cond3) > 0:
arr.append(cond3)
legend.append('Household')
ax0.hist(arr, bins, histtype='bar', stacked=True, label=legend)
ax0.legend()
arr = []
legend = []
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= r2_max_time].sort_values()
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= r2_max_time].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
ho_cases -= self._max_time_offset
d_cases -= self._max_time_offset
if len(d_cases) > 0:
arr.append(d_cases)
legend.append('Deceased')
if len(ho_cases) > 0:
arr.append(ho_cases)
legend.append('Hospitalized')
ax1.hist(arr, bins, histtype='bar', stacked=True, label=legend)
ax1.legend()
ax0.set_title(f'Daily stacked summaries of simulated covid19\n {self._params[EXPERIMENT_ID]}')
ax0.set_ylabel('Infections')
ax0.set_xlabel('Time in days')
ax1.set_ylabel('Outcome')
ax1.set_xlabel('Time in days')
if self._params[TURN_ON_DETECTION]:
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
det_cases -= self._max_time_offset
ax2.hist(det_cases, bins, histtype='bar', stacked=True, label='Daily officially detected cases')
ax2.set_ylabel('Detections')
ax2.set_xlabel('Time in days')
ax2.legend()
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'bins.png'))
plt.close(fig)
def plot_values(self, values, label, ax, yvalues=None, type='plot', reduce_offset=True, dots=False):
if len(values) > 0:
x = values
if reduce_offset:
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
x -= self._max_time_offset
else:
return np.arange(0), np.arange(0)
if yvalues is None:
y = np.arange(1, 1 + len(x))
else:
y = yvalues
if type == 'plot':
if dots:
ax.plot(x, y, 'ok', label=label)
else:
ax.plot(x, y, label=label)
elif type == 'semilogy':
ax.semilogy(x, y, label=label)
if self._params[USE_TODAY_MARK]:
today = float(self._params[TODAY_OFFSET])
counter = sum(np.array(x) <= today)
label_at_today = f'{label} at T={today}: {counter}'
ax.plot([self._params[TODAY_OFFSET]] * 2, [0, len(values)], 'k-', label=label_at_today)
return x, y
return np.arange(0), np.arange(0)
def store_graphs(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, ax = plt.subplots(nrows=1, ncols=1)
vals = df_r2.contraction_time.sort_values()
self.plot_values(vals, 'Prevalence', ax)
#cond1 = df_r2.contraction_time[df_r2.kernel == 'import_intensity'].sort_values()
#cond2 = df_r2.contraction_time[df_r2.kernel == 'constant'].sort_values()
#cond3 = df_r2.contraction_time[df_r2.kernel == 'household'].sort_values()
#self.plot_values(cond1, 'Imported', ax)
#self.plot_values(cond2, 'Inf. through constant kernel', ax)
#self.plot_values(cond3, 'Inf. through household', ax)
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(d_cases, 'Deceased', ax)
self.plot_values(ho_cases, 'Hospitalized', ax)
self.plot_values(det_cases, 'Detected', ax)
self.add_observed_curve(ax)
if QUARANTINE in df_r1.columns:
quarantined_cases = df_r1[~df_r1.quarantine.isna()].sort_values(by='quarantine').quarantine
q_cases = quarantined_cases[quarantined_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(q_cases, 'Quarantined', ax)
ax.legend()
ax.set_title(f'simulation of covid19 dynamics\n {self._params[EXPERIMENT_ID]}')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'summary.png'))
plt.close(fig)
def store_detections(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self._all_runs_detected.append(det_cases)
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.set_title(f'detected cases in time\n {self._params[EXPERIMENT_ID]}')
self.plot_values(det_cases, 'Detected', ax)
self.add_observed_curve(ax)
ax.legend()
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'summary_detections.png'))
plt.close(fig)
def lancet_store_graphs(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, ax = plt.subplots(nrows=1, ncols=1)
vals = df_r2.contraction_time.sort_values()
self.plot_values(vals, 'Prevalence', ax)
self._all_runs_prevalence.append(vals)
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self._all_runs_severe.append(ho_cases)
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(d_cases, 'Deceased', ax)
self.plot_values(ho_cases, 'Hospitalized', ax)
self.plot_values(det_cases, 'Detected', ax)
if QUARANTINE in df_r1.columns:
quarantined_cases = df_r1[~df_r1.quarantine.isna()].sort_values(by='quarantine').quarantine
q_cases = quarantined_cases[quarantined_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(q_cases, 'Quarantined', ax)
ax.legend()
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'lancet_summary.png'))
plt.close(fig)
def store_semilogy(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
vals = df_r2.contraction_time.sort_values()
fig, ax = plt.subplots(nrows=1, ncols=1)
self.plot_values(vals, 'Prevalence', ax, type='semilogy')
#cond1 = df_r2.contraction_time[df_r2.kernel == 'import_intensity'].sort_values()
#cond2 = df_r2.contraction_time[df_r2.kernel == 'constant'].sort_values()
#cond3 = df_r2.contraction_time[df_r2.kernel == 'household'].sort_values()
#self.plot_values(cond1, 'Imported', ax, type='semilogy')
#self.plot_values(cond2, 'Inf. through constant kernel', ax, type='semilogy')
#self.plot_values(cond3, 'Inf. through household', ax, type='semilogy')
hospitalized_cases = df_r1[~df_r1.t2.isna()].sort_values(by='t2').t2
ho_cases = hospitalized_cases[hospitalized_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
detected_cases = self.detected_cases(df_r1)
det_cases = detected_cases[detected_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(d_cases, 'Deceased', ax, type='semilogy')
self.plot_values(ho_cases, 'Hospitalized', ax, type='semilogy')
self.plot_values(det_cases, 'Detected', ax, type='semilogy')
self.add_observed_curve(ax)
if QUARANTINE in df_r1.columns:
quarantined_cases = df_r1[~df_r1.quarantine.isna()].sort_values(by='quarantine').quarantine
q_cases = quarantined_cases[quarantined_cases <= df_r2.contraction_time.max(axis=0)].sort_values()
self.plot_values(q_cases, 'Quarantined', ax, type='semilogy')
ax.legend()
ax.set_title(f'simulation of covid19 dynamics\n {self._params[EXPERIMENT_ID]}')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'summary_semilogy.png'))
plt.close(fig)
def test_lognormal_prevalence(self, simulation_output_dir):
fig, ax = plt.subplots(nrows=1, ncols=1)
for i, run in enumerate(self._all_runs_prevalence):
self.plot_values(run, f'Run {i}', ax, reduce_offset=False, type='semilogy')
self.add_observed_curve(ax)
#ax.legend()
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
ax.set_title(f'Sample paths of prevalence')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'test_lognormal_prevalence.png'))
plt.close(fig)
def test_lognormal_detected(self, simulation_output_dir):
fig, ax = plt.subplots(nrows=1, ncols=1)
for i, run in enumerate(self._all_runs_detected):
self.plot_values(run, f'Run {i}', ax, reduce_offset=False, type='semilogy')
self.add_observed_curve(ax)
#ax.legend()
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
ax.set_title(f'Sample paths of detected cases')
#ax.set_title(f'Analiza')
#xloc = [0, 5, 10, 15, 20]
#dates = ['12/03/20', '17/03/20', '22/03/20', '27/03/20', '1/04/20', '6/04/20']
#ax.set_ylabel('Zdiagnozowani (skala logarytmiczna)')
#ax.set_xlabel('Data')
#ax.set_xticks(xloc)
#ax.set_xticklabels(dates, rotation=30)
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'test_lognormal_detected.png'))
plt.close(fig)
def test_lognormal_severe(self, simulation_output_dir):
fig, ax = plt.subplots(nrows=1, ncols=1)
for i, run in enumerate(self._all_runs_severe):
self.plot_values(run, f'Run {i}', ax, reduce_offset=False, type='semilogy')
self.add_observed_curve(ax)
#ax.legend()
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
ax.set_title(f'Sample paths of severe cases')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'test_lognormal_severe.png'))
plt.close(fig)
def test_detected_cases(self, simulation_output_dir):
fig, ax = plt.subplots(nrows=1, ncols=1)
x = []
y = []
p_x = []
p_y = []
successes = 0
for i, (run, run_p) in enumerate(zip(self._all_runs_detected, self._all_runs_prevalence)):
x_, y_ = self.plot_values(run.values, f'Run {i}', ax, reduce_offset=False)
#x_ = x_.values
#TODO
if len(x_) > self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME]:
t0 = x_[self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME]]
filt_x = x_[x_ <= t0 - 7]
if len(filt_x) > 0:
arg_tminus7 = np.argmax(filt_x)
if np.abs(y_[arg_tminus7] - self._params[LAID_CURVE]["-7"]) < 0.1 * self._params[LAID_CURVE]["-7"]:
x.extend(list(x_))
y.extend(list(y_))
p_x_ = run_p.values
p_y_ = np.arange(1, 1 + len(p_x_))
p_x.extend(p_x_)
p_y.extend(p_y_)
successes += 1
self.logger.info(f'There are {successes} successes')
self.add_observed_curve(ax)
#ax.legend()
ax.set_xlim(self.xlim_cut)
ax.set_ylim(self.ylim_cut)
reduction = (1 - self._params[FEAR_FACTORS].get(CONSTANT, self._params[FEAR_FACTORS][DEFAULT])[LIMIT_VALUE]) * 100
R = 2.34 * self._params[TRANSMISSION_PROBABILITIES][CONSTANT]
reducted = (100 - reduction) * R / 100
title = f'Prognoza diagnoz (q={self._params[DETECTION_MILD_PROBA]:.1f}, redukcja R* z {R:.2f} o {reduction:.0f}% do {reducted:.2f})'
title2 = f'Prognoza liczby zakażonych\n(q={self._params[DETECTION_MILD_PROBA]:.1f}, redukcja R* z {R:.2f} o {reduction:.0f}% do {reducted:.2f})'
ax.set_title(title)
#ax.set_title(f'Sample paths of detected cases')
xloc = [0, 5, 10, 15, 20, 25, 28]
dates = ['02/04/20', '07/04/20', '12/04/20', '17/04/20', '22/04/20', '27/04/20', '30/04/20']
ax.set_ylabel('Zdiagnozowani')
ax.set_xlabel('Data')
ax.set_xticks(xloc)
ax.set_xticklabels(dates, rotation=30)
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'detected_cases_pl.png'))
plt.close(fig)
if successes > 0:
#xy = np.vstack([x, y])
#z = scipy.stats.gaussian_kde(xy)(xy)
fig, ax = plt.subplots()
ax.set_title(title)
#ax.scatter(x, y, c=z, s=1, edgecolor='')
ax.scatter(x, y, s=1, edgecolor='')
self.add_observed_curve(ax)
xloc = [0, -5, -10, -15, -20]
dates = ['02/04/20', '28/03/20', '23/03/20', '18/03/20', '13/03/20']
ax.set_ylabel('Zdiagnozowani')
ax.set_xlabel('Data')
ax.set_xticks(xloc)
ax.set_xticklabels(dates, rotation=30)
ax.set_xlim([-20, 0])
ax.set_ylim([0, self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME]*1.4])
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'detected_cases_density_pl.png'))
plt.close(fig)
fig, ax = plt.subplots()
ax.set_title(title)
#ax.scatter(x, y, c=z, s=1, edgecolor='')
ax.scatter(x, y, s=1, edgecolor='')
self.add_observed_curve(ax)
ax.set_xlim(self.xlim_cut)
ax.set_ylim(self.ylim_cut)
xloc = [0, 5, 10, 15, 20, 25, 28]
dates = ['02/04/20', '07/04/20', '12/04/20', '17/04/20', '22/04/20', '27/04/20', '30/04/20']
ax.set_ylabel('Zdiagnozowani')
ax.set_xlabel('Data')
ax.set_xticks(xloc)
ax.set_xticklabels(dates, rotation=30)
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'detected_cases_density_pl_cut.png'))
plt.close(fig)
#############################################
#xy = np.vstack([p_x, p_y])
#z = scipy.stats.gaussian_kde(xy)(xy)
fig, ax = plt.subplots()
ax.set_title(title2)
#ax.scatter(p_x, p_y, c=z, s=1, edgecolor='')
ax.scatter(p_x, p_y, s=1, edgecolor='')
self.add_observed_curve(ax)
xloc = [0, -5, -10, -15, -20]
dates = ['02/04/20', '28/03/20', '23/03/20', '18/03/20', '13/03/20']
ax.set_ylabel('Zakażeni')
ax.set_xlabel('Data')
ax.set_xticks(xloc)
ax.set_xticklabels(dates, rotation=30)
ax.set_xlim([-20, 0])
ax.set_ylim([0, self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME] * 6.0])
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'prevalence_density_pl.png'))
plt.close(fig)
fig, ax = plt.subplots()
ax.set_title(title2)
#ax.scatter(p_x, p_y, c=z, s=1, edgecolor='')
ax.scatter(p_x, p_y, s=1, edgecolor='')
self.add_observed_curve(ax)
ax.set_xlim(self.xlim_cut)
ax.set_ylim(self.ylim_cut)
xloc = [0, 5, 10, 15, 20, 25, 28]
dates = ['02/04/20', '07/04/20', '12/04/20', '17/04/20', '22/04/20', '27/04/20', '30/04/20']
ax.set_ylabel('Zakażeni')
ax.set_xlabel('Data')
ax.set_xticks(xloc)
ax.set_xticklabels(dates, rotation=30)
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'prevalence_density_pl_cut.png'))
plt.close(fig)
return successes
def add_observed_curve(self, ax):
if self._params[LAID_CURVE].items():
laid_curve_x = np.array([float(elem) for elem in self._params[LAID_CURVE].keys()])
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
laid_curve_x = np.array([float(elem) + self._max_time_offset for elem in self._params[LAID_CURVE].keys()])
laid_curve_y = np.array(list(self._params[LAID_CURVE].values()))
self.plot_values(laid_curve_x, 'Cases observed in PL', ax, yvalues=laid_curve_y, dots=True)
def lancet_icu_beds(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, ax = plt.subplots(nrows=1, ncols=1)
cond = [k for k, v in self._expected_case_severity.items() if v == ExpectedCaseSeverity.Critical]
critical = df_r1.loc[df_r1.index.isin(cond)]
plus = critical.t2.values
deceased = critical[~critical.tdeath.isna()]
survived = critical[critical.tdeath.isna()]
minus1 = survived.trecovery.values
minus2 = deceased.tdeath.values
max_time = df_r2.contraction_time.max(axis=0)
df_plus = pd.DataFrame({'t': plus, 'd': np.ones_like(plus)})
df_minus1 = pd.DataFrame({'t': minus1, 'd': -np.ones_like(minus1)})
df_minus2 = pd.DataFrame({'t': minus2, 'd': -np.ones_like(minus2)})
df = df_plus.append(df_minus1).append(df_minus2).sort_values(by='t')
df = df[df.t <= max_time]
if len(df) == 0:
return
cumv = df.d.cumsum().values
x = df.t.values
self.plot_values(x, yvalues=cumv, label='ICU required', ax=ax)
largest_y = cumv.max()
icu_availability = self._params[ICU_AVAILABILITY]
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= max_time].sort_values()
if len(d_cases) > 0:
self.plot_values(d_cases, 'deceased', ax)
largest_y = max(largest_y, len(d_cases))
t = [0, max_time]
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
t = [elem - self._max_time_offset for elem in t]
ax.plot(t, [icu_availability] * 2, label=f'ICU capacity ({icu_availability})')
cumv_filter_flag = cumv > icu_availability
if cumv[cumv_filter_flag].any():
critical_t = df.t.values[cumv_filter_flag].min()
self.band_time = critical_t
ax.plot([critical_t] * 2, [0, largest_y], label=f'Critical time {critical_t:.1f}')
ax.legend() # 'upper left')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'lancet_icu_beds_analysis.png'))
plt.close(fig)
def icu_beds(self, simulation_output_dir):
df_r1 = self.df_progression_times
df_r2 = self.df_infections
fig, ax = plt.subplots(nrows=1, ncols=1)
cond = [k for k, v in self._expected_case_severity.items() if v == ExpectedCaseSeverity.Critical]
critical = df_r1.loc[df_r1.index.isin(cond)]
plus = critical.t2.values
deceased = critical[~critical.tdeath.isna()]
survived = critical[critical.tdeath.isna()]
minus1 = survived.trecovery.values
minus2 = deceased.tdeath.values
max_time = df_r2.contraction_time.max(axis=0)
df_plus = pd.DataFrame({'t': plus, 'd': np.ones_like(plus)})
df_minus1 = pd.DataFrame({'t': minus1, 'd': -np.ones_like(minus1)})
df_minus2 = pd.DataFrame({'t': minus2, 'd': -np.ones_like(minus2)})
df = df_plus.append(df_minus1).append(df_minus2).sort_values(by='t')
df = df[df.t <= max_time]
if len(df) == 0:
return
cumv = df.d.cumsum().values
x = df.t.values
self.plot_values(x, yvalues=cumv, label='ICU required', ax=ax)
largest_y = cumv.max()
icu_availability = self._params[ICU_AVAILABILITY]
death_cases = df_r1[~df_r1.tdeath.isna()].sort_values(by='tdeath').tdeath
d_cases = death_cases[death_cases <= max_time].sort_values()
t = [0, max_time]
ax.plot(t, [icu_availability] * 2, label='ICU capacity')
cumv_filter_flag = cumv > icu_availability
if cumv[cumv_filter_flag].any():
critical_t = df.t.values[cumv_filter_flag].min()
self.band_time = critical_t
ax.plot([critical_t] * 2, [0, largest_y], label=f'Critical time {critical_t:.1f}')
ax.legend() #'upper left')
ax.set_title('ICU requirements\n{self._params[EXPERIMENT_ID]}')
fig.tight_layout()
plt.savefig(os.path.join(simulation_output_dir, 'icu_beds_analysis.png'))
plt.close(fig) | StarcoderdataPython |
40366 | <reponame>oremj/zamboni
import hashlib
import uuid
from django.conf import settings
import commonware.log
from moz_inapp_pay.verify import verify_claims, verify_keys
import jwt
log = commonware.log.getLogger('z.crypto')
secret = settings.APP_PURCHASE_SECRET
class InvalidSender(Exception):
pass
def get_uuid():
return 'webpay:%s' % hashlib.md5(str(uuid.uuid4())).hexdigest()
def verify_webpay_jwt(signed_jwt):
# This can probably be deleted depending upon solitude.
try:
jwt.decode(signed_jwt.encode('ascii'), secret)
except Exception, e:
log.error('Error decoding webpay jwt: %s' % e, exc_info=True)
return {'valid': False}
return {'valid': True}
def sign_webpay_jwt(data):
return jwt.encode(data, secret)
def parse_from_webpay(signed_jwt, ip):
try:
data = jwt.decode(signed_jwt.encode('ascii'), secret)
except Exception, e:
log.info('Received invalid webpay postback from IP %s: %s' %
(ip or '(unknown)', e), exc_info=True)
raise InvalidSender()
verify_claims(data)
iss, aud, product_data, trans_id = verify_keys(
data,
('iss', 'aud', 'request.productData', 'response.transactionID'))
log.info('Received webpay postback JWT: iss:%s aud:%s '
'trans_id:%s product_data:%s'
% (iss, aud, trans_id, product_data))
return data
| StarcoderdataPython |
8009472 | <filename>pyFAI/gui/utils/ProxyAction.py
# coding: utf-8
# /*##########################################################################
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "27/08/2018"
from distutils.version import LooseVersion
from silx.gui import qt
class ProxyAction(qt.QAction):
"""Create a QAction synchronized with a source action.
This allow to intercept all the gettes and setters by inheritance.
"""
def __init__(self, parent, source):
super(ProxyAction, self).__init__(parent)
self.__source = source
self.__source.changed.connect(self.__sourceChanged)
self.toggled.connect(self.__actionToggled)
self.triggered.connect(self.__actionTriggered)
self.hovered.connect(self.__actionHovered)
self.__sourceChanged()
def sourceAction(self):
return self.__source
def __sourceChanged(self):
self.setCheckable(self.__source.isCheckable())
self.setEnabled(self.__source.isEnabled())
self.setFont(self.__source.font())
self.setIcon(self.__source.icon())
self.setIconText(self.__source.iconText())
self.setIconVisibleInMenu(self.__source.isIconVisibleInMenu())
self.setMenuRole(self.__source.menuRole())
self.setShortcut(self.__source.shortcut())
self.setShortcutContext(self.__source.shortcutContext())
if LooseVersion(qt.qVersion()) >= LooseVersion("5.10"):
self.setShortcutVisibleInContextMenu(self.__source.isShortcutVisibleInContextMenu())
self.setStatusTip(self.__source.statusTip())
self.setText(self.__source.text())
self.setToolTip(self.__source.toolTip())
self.setVisible(self.__source.isVisible())
self.setWhatsThis(self.__source.whatsThis())
def __actionToggled(self):
self.__source.toggled.emit()
def __actionTriggered(self):
self.__source.triggered.emit()
def __actionHovered(self):
self.__source.hovered.emit()
def hover(self):
self.__source.hover()
def toggle(self):
self.__source.toggle()
def trigger(self):
self.__source.trigger()
class CustomProxyAction(ProxyAction):
"""Create a QAction synchronized with a source action.
Some properties of the source can be overrided.
"""
def __init__(self, parent, source):
self.__forcedText = None
self.__forcedIconText = None
super(CustomProxyAction, self).__init__(parent, source)
def forceText(self, text):
"""Override the text of the the source action.
Property can be removed by using None. In this case the text set back
using the sourceAction.
"""
self.__forcedText = text
if self.__forcedText is None:
text = self.sourceAction().text()
super(CustomProxyAction, self).setText(text)
def setText(self, text):
if self.__forcedText is None:
super(CustomProxyAction, self).setText(text)
def forceIconText(self, iconText):
"""Override the iconText of the the source action.
Property can be removed by using None. In this case the text set back
using the sourceAction.
"""
self.__forcedIconText = iconText
if self.__forcedIconText is None:
iconText = self.sourceAction().iconText()
super(CustomProxyAction, self).setIconText(iconText)
def setIconText(self, text):
if self.__forcedIconText is None:
super(CustomProxyAction, self).setIconText(text)
| StarcoderdataPython |
319152 | <filename>python/chapter-10/queue_config_check.py
###############################################
# RabbitMQ in Action
# Chapter 10 - Queue config watchdog check.
###############################################
#
#
# Author: <NAME>
# (C)2011
###############################################
import sys, json, httplib, urllib, base64, socket
#(qcwc.0) Nagios status codes
EXIT_OK = 0
EXIT_WARNING = 1
EXIT_CRITICAL = 2
EXIT_UNKNOWN = 3
#/(qcwc.1) Parse arguments
server, port = sys.argv[1].split(":")
vhost = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
queue_name = sys.argv[5]
auto_delete = json.loads(sys.argv[6].lower())
durable = json.loads(sys.argv[7].lower())
#/(qcwc.2) Connect to server
conn = httplib.HTTPConnection(server, port)
#/(qcwc.3) Build API path
path = "/api/queues/%s/%s" % (urllib.quote(vhost, safe=""),
urllib.quote(queue_name))
method = "GET"
#/(qcwc.4) Issue API request
credentials = base64.b64encode("%s:%s" % (username, password))
try:
conn.request(method, path, "",
{"Content-Type" : "application/json",
"Authorization" : "Basic " + credentials})
#/(qcwc.5) Could not connect to API server, return unknown status
except socket.error:
print "UNKNOWN: Could not connect to %s:%s" % (server, port)
exit(EXIT_UNKNOWN)
response = conn.getresponse()
#/(qcwc.6) Queue does not exist, return critical status
if response.status == 404:
print "CRITICAL: Queue %s does not exist." % queue_name
exit(EXIT_CRITICAL)
#/(qcwc.7) Unexpected API error, return unknown status
elif response.status > 299:
print "UNKNOWN: Unexpected API error: %s" % response.read()
exit(EXIT_UNKNOWN)
#/(qcwc.8) Parse API response
response = json.loads(response.read())
#/(qcwc.9) Queue auto_delete flag incorrect, return warning status
if response["auto_delete"] != auto_delete:
print "WARN: Queue '%s' - auto_delete flag is NOT %s." % \
(queue_name, auto_delete)
exit(EXIT_WARNING)
#/(qcwc.10) Queue durable flag incorrect, return warning status
if response["durable"] != durable:
print "WARN: Queue '%s' - durable flag is NOT %s." % \
(queue_name, durable)
exit(EXIT_WARNING)
#/(qcwc.11) Queue exists and it's flags are correct, return OK status
print "OK: Queue %s configured correctly." % queue_name
exit(EXIT_OK) | StarcoderdataPython |
11372794 | <filename>store/views.py<gh_stars>0
from django.shortcuts import render
from .models import Book
def index(request):
return render(request, 'store/index.html')
def store(request):
count = Book.objects.all().count()
context = {
'count': count
}
return render(request, 'store/store.html', context) | StarcoderdataPython |
247265 | #!/usr/bin/env python3
import unittest
import clock
class TestClock(unittest.TestCase):
def test_current_time(self):
self.assertIsNotNone(clock.current_time())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
7177 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
# <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
"""
Tests for neo.io.nixio
"""
import os
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import string
import itertools
from six import string_types
import numpy as np
import quantities as pq
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch)
from neo.test.iotest.common_io_test import BaseTestIO
try:
import nixio
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
from neo.io.nixio import NixIO
from neo.io.nixio import nixtypes
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class NixIOTest(unittest.TestCase):
filename = None
io = None
def compare_blocks(self, neoblocks, nixblocks):
for neoblock, nixblock in zip(neoblocks, nixblocks):
self.compare_attr(neoblock, nixblock)
self.assertEqual(len(neoblock.segments), len(nixblock.groups))
for idx, neoseg in enumerate(neoblock.segments):
nixgrp = nixblock.groups[neoseg.name]
self.compare_segment_group(neoseg, nixgrp)
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixsrc = nixblock.sources[neochx.name]
else:
nixsrc = nixblock.sources[idx]
self.compare_chx_source(neochx, nixsrc)
self.check_refs(neoblock, nixblock)
def compare_chx_source(self, neochx, nixsrc):
self.compare_attr(neochx, nixsrc)
nix_channels = list(src for src in nixsrc.sources
if src.type == "neo.channelindex")
self.assertEqual(len(neochx.index), len(nix_channels))
for nixchan in nix_channels:
nixchanidx = nixchan.metadata["index"]
try:
neochanpos = list(neochx.index).index(nixchanidx)
except ValueError:
self.fail("Channel indexes do not match.")
if len(neochx.channel_names):
neochanname = neochx.channel_names[neochanpos]
if ((not isinstance(neochanname, str)) and
isinstance(neochanname, bytes)):
neochanname = neochanname.decode()
nixchanname = nixchan.name
self.assertEqual(neochanname, nixchanname)
nix_units = list(src for src in nixsrc.sources
if src.type == "neo.unit")
self.assertEqual(len(neochx.units), len(nix_units))
for neounit in neochx.units:
nixunit = nixsrc.sources[neounit.name]
self.compare_attr(neounit, nixunit)
def check_refs(self, neoblock, nixblock):
"""
Checks whether the references between objects that are not nested are
mapped correctly (e.g., SpikeTrains referenced by a Unit).
:param neoblock: A Neo block
:param nixblock: The corresponding NIX block
"""
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixchx = nixblock.sources[neochx.name]
else:
nixchx = nixblock.sources[idx]
# AnalogSignals referencing CHX
neoasigs = list(sig.name for sig in neochx.analogsignals)
nixasigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.analogsignal" and
nixchx in da.sources))
self.assertEqual(len(neoasigs), len(nixasigs))
# IrregularlySampledSignals referencing CHX
neoisigs = list(sig.name for sig in neochx.irregularlysampledsignals)
nixisigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.irregularlysampledsignal" and
nixchx in da.sources))
self.assertEqual(len(neoisigs), len(nixisigs))
# SpikeTrains referencing CHX and Units
for sidx, neounit in enumerate(neochx.units):
if neounit.name:
nixunit = nixchx.sources[neounit.name]
else:
nixunit = nixchx.sources[sidx]
neosts = list(st.name for st in neounit.spiketrains)
nixsts = list(mt for mt in nixblock.multi_tags
if mt.type == "neo.spiketrain" and
nixunit.name in mt.sources)
# SpikeTrains must also reference CHX
for nixst in nixsts:
self.assertIn(nixchx.name, nixst.sources)
nixsts = list(st.name for st in nixsts)
self.assertEqual(len(neosts), len(nixsts))
for neoname in neosts:
if neoname:
self.assertIn(neoname, nixsts)
# Events and Epochs must reference all Signals in the Group (NIX only)
for nixgroup in nixblock.groups:
nixevep = list(mt for mt in nixgroup.multi_tags
if mt.type in ["neo.event", "neo.epoch"])
nixsigs = list(da.name for da in nixgroup.data_arrays
if da.type in ["neo.analogsignal",
"neo.irregularlysampledsignal"])
for nee in nixevep:
for ns in nixsigs:
self.assertIn(ns, nee.references)
def compare_segment_group(self, neoseg, nixgroup):
self.compare_attr(neoseg, nixgroup)
neo_signals = neoseg.analogsignals + neoseg.irregularlysampledsignals
self.compare_signals_das(neo_signals, nixgroup.data_arrays)
neo_eests = neoseg.epochs + neoseg.events + neoseg.spiketrains
self.compare_eests_mtags(neo_eests, nixgroup.multi_tags)
def compare_signals_das(self, neosignals, data_arrays):
for sig in neosignals:
if self.io._find_lazy_loaded(sig) is not None:
sig = self.io.load_lazy_object(sig)
dalist = list()
for idx in itertools.count():
nixname = "{}.{}".format(sig.name, idx)
if nixname in data_arrays:
dalist.append(data_arrays[nixname])
else:
break
_, nsig = np.shape(sig)
self.assertEqual(nsig, len(dalist))
self.compare_signal_dalist(sig, dalist)
def compare_signal_dalist(self, neosig, nixdalist):
"""
Check if a Neo Analog or IrregularlySampledSignal matches a list of
NIX DataArrays.
:param neosig: Neo Analog or IrregularlySampledSignal
:param nixdalist: List of DataArrays
"""
nixmd = nixdalist[0].metadata
self.assertTrue(all(nixmd == da.metadata for da in nixdalist))
neounit = str(neosig.dimensionality)
for sig, da in zip(np.transpose(neosig),
sorted(nixdalist, key=lambda d: d.name)):
self.compare_attr(neosig, da)
np.testing.assert_almost_equal(sig.magnitude, da)
self.assertEqual(neounit, da.unit)
timedim = da.dimensions[0]
if isinstance(neosig, AnalogSignal):
self.assertIsInstance(timedim, nixtypes["SampledDimension"])
self.assertEqual(
pq.Quantity(timedim.sampling_interval, timedim.unit),
neosig.sampling_period
)
self.assertEqual(timedim.offset, neosig.t_start.magnitude)
if "t_start.units" in da.metadata.props:
self.assertEqual(da.metadata["t_start.units"],
str(neosig.t_start.dimensionality))
elif isinstance(neosig, IrregularlySampledSignal):
self.assertIsInstance(timedim, nixtypes["RangeDimension"])
np.testing.assert_almost_equal(neosig.times.magnitude,
timedim.ticks)
self.assertEqual(timedim.unit,
str(neosig.times.dimensionality))
def compare_eests_mtags(self, eestlist, mtaglist):
self.assertEqual(len(eestlist), len(mtaglist))
for eest in eestlist:
if self.io._find_lazy_loaded(eest) is not None:
eest = self.io.load_lazy_object(eest)
mtag = mtaglist[eest.name]
if isinstance(eest, Epoch):
self.compare_epoch_mtag(eest, mtag)
elif isinstance(eest, Event):
self.compare_event_mtag(eest, mtag)
elif isinstance(eest, SpikeTrain):
self.compare_spiketrain_mtag(eest, mtag)
def compare_epoch_mtag(self, epoch, mtag):
self.assertEqual(mtag.type, "neo.epoch")
self.compare_attr(epoch, mtag)
np.testing.assert_almost_equal(epoch.times.magnitude, mtag.positions)
np.testing.assert_almost_equal(epoch.durations.magnitude, mtag.extents)
self.assertEqual(mtag.positions.unit,
str(epoch.times.units.dimensionality))
self.assertEqual(mtag.extents.unit,
str(epoch.durations.units.dimensionality))
for neol, nixl in zip(epoch.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_event_mtag(self, event, mtag):
self.assertEqual(mtag.type, "neo.event")
self.compare_attr(event, mtag)
np.testing.assert_almost_equal(event.times.magnitude, mtag.positions)
self.assertEqual(mtag.positions.unit, str(event.units.dimensionality))
for neol, nixl in zip(event.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
# Only happens in 3.2
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_spiketrain_mtag(self, spiketrain, mtag):
self.assertEqual(mtag.type, "neo.spiketrain")
self.compare_attr(spiketrain, mtag)
np.testing.assert_almost_equal(spiketrain.times.magnitude,
mtag.positions)
if len(mtag.features):
neowf = spiketrain.waveforms
nixwf = mtag.features[0].data
self.assertEqual(np.shape(neowf), np.shape(nixwf))
self.assertEqual(nixwf.unit, str(neowf.units.dimensionality))
np.testing.assert_almost_equal(neowf.magnitude, nixwf)
self.assertIsInstance(nixwf.dimensions[0], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[1], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[2],
nixtypes["SampledDimension"])
def compare_attr(self, neoobj, nixobj):
if neoobj.name:
if isinstance(neoobj, (AnalogSignal, IrregularlySampledSignal)):
nix_name = ".".join(nixobj.name.split(".")[:-1])
else:
nix_name = nixobj.name
self.assertEqual(neoobj.name, nix_name)
self.assertEqual(neoobj.description, nixobj.definition)
if hasattr(neoobj, "rec_datetime") and neoobj.rec_datetime:
self.assertEqual(neoobj.rec_datetime,
datetime.fromtimestamp(nixobj.created_at))
if hasattr(neoobj, "file_datetime") and neoobj.file_datetime:
self.assertEqual(neoobj.file_datetime,
datetime.fromtimestamp(
nixobj.metadata["file_datetime"]))
if neoobj.annotations:
nixmd = nixobj.metadata
for k, v, in neoobj.annotations.items():
if isinstance(v, pq.Quantity):
self.assertEqual(nixmd.props[str(k)].unit,
str(v.dimensionality))
np.testing.assert_almost_equal(nixmd[str(k)],
v.magnitude)
else:
self.assertEqual(nixmd[str(k)], v)
@classmethod
def create_full_nix_file(cls, filename):
nixfile = nixio.File.open(filename, nixio.FileMode.Overwrite)
nix_block_a = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_a.definition = cls.rsentence(5, 10)
nix_block_b = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_b.definition = cls.rsentence(3, 3)
nix_block_a.metadata = nixfile.create_section(
nix_block_a.name, nix_block_a.name+".metadata"
)
nix_block_b.metadata = nixfile.create_section(
nix_block_b.name, nix_block_b.name+".metadata"
)
nix_blocks = [nix_block_a, nix_block_b]
for blk in nix_blocks:
for ind in range(3):
group = blk.create_group(cls.rword(), "neo.segment")
group.definition = cls.rsentence(10, 15)
group_md = blk.metadata.create_section(group.name,
group.name+".metadata")
group.metadata = group_md
blk = nix_blocks[0]
group = blk.groups[0]
allspiketrains = list()
allsignalgroups = list()
# analogsignals
for n in range(3):
siggroup = list()
asig_name = "{}_asig{}".format(cls.rword(10), n)
asig_definition = cls.rsentence(5, 5)
asig_md = group.metadata.create_section(asig_name,
asig_name+".metadata")
for idx in range(3):
da_asig = blk.create_data_array(
"{}.{}".format(asig_name, idx),
"neo.analogsignal",
data=cls.rquant(100, 1)
)
da_asig.definition = asig_definition
da_asig.unit = "mV"
da_asig.metadata = asig_md
timedim = da_asig.append_sampled_dimension(0.01)
timedim.unit = "ms"
timedim.label = "time"
timedim.offset = 10
da_asig.append_set_dimension()
group.data_arrays.append(da_asig)
siggroup.append(da_asig)
allsignalgroups.append(siggroup)
# irregularlysampledsignals
for n in range(2):
siggroup = list()
isig_name = "{}_isig{}".format(cls.rword(10), n)
isig_definition = cls.rsentence(12, 12)
isig_md = group.metadata.create_section(isig_name,
isig_name+".metadata")
isig_times = cls.rquant(200, 1, True)
for idx in range(10):
da_isig = blk.create_data_array(
"{}.{}".format(isig_name, idx),
"neo.irregularlysampledsignal",
data=cls.rquant(200, 1)
)
da_isig.definition = isig_definition
da_isig.unit = "mV"
da_isig.metadata = isig_md
timedim = da_isig.append_range_dimension(isig_times)
timedim.unit = "s"
timedim.label = "time"
da_isig.append_set_dimension()
group.data_arrays.append(da_isig)
siggroup.append(da_isig)
allsignalgroups.append(siggroup)
# SpikeTrains with Waveforms
for n in range(4):
stname = "{}-st{}".format(cls.rword(20), n)
times = cls.rquant(400, 1, True)
times_da = blk.create_data_array(
"{}.times".format(stname),
"neo.spiketrain.times",
data=times
)
times_da.unit = "ms"
mtag_st = blk.create_multi_tag(stname,
"neo.spiketrain",
times_da)
group.multi_tags.append(mtag_st)
mtag_st.definition = cls.rsentence(20, 30)
mtag_st_md = group.metadata.create_section(
mtag_st.name, mtag_st.name+".metadata"
)
mtag_st.metadata = mtag_st_md
mtag_st_md.create_property(
"t_stop", nixio.Value(max(times_da).item()+1)
)
waveforms = cls.rquant((10, 8, 5), 1)
wfname = "{}.waveforms".format(mtag_st.name)
wfda = blk.create_data_array(wfname, "neo.waveforms",
data=waveforms)
wfda.unit = "mV"
mtag_st.create_feature(wfda, nixio.LinkType.Indexed)
wfda.append_set_dimension() # spike dimension
wfda.append_set_dimension() # channel dimension
wftimedim = wfda.append_sampled_dimension(0.1)
wftimedim.unit = "ms"
wftimedim.label = "time"
wfda.metadata = mtag_st_md.create_section(
wfname, "neo.waveforms.metadata"
)
wfda.metadata.create_property("left_sweep",
[nixio.Value(20)]*5)
allspiketrains.append(mtag_st)
# Epochs
for n in range(3):
epname = "{}-ep{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(epname),
"neo.epoch.times",
data=times
)
times_da.unit = "s"
extents = cls.rquant(5, 1)
extents_da = blk.create_data_array(
"{}.durations".format(epname),
"neo.epoch.durations",
data=extents
)
extents_da.unit = "s"
mtag_ep = blk.create_multi_tag(
epname, "neo.epoch", times_da
)
group.multi_tags.append(mtag_ep)
mtag_ep.definition = cls.rsentence(2)
mtag_ep.extents = extents_da
label_dim = mtag_ep.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ep.references.extend(siggroup)
# Events
for n in range(2):
evname = "{}-ev{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(evname),
"neo.event.times",
data=times
)
times_da.unit = "s"
mtag_ev = blk.create_multi_tag(
evname, "neo.event", times_da
)
group.multi_tags.append(mtag_ev)
mtag_ev.definition = cls.rsentence(2)
label_dim = mtag_ev.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ev.references.extend(siggroup)
# CHX
nixchx = blk.create_source(cls.rword(10),
"neo.channelindex")
nixchx.metadata = nix_blocks[0].metadata.create_section(
nixchx.name, "neo.channelindex.metadata"
)
chantype = "neo.channelindex"
# 3 channels
for idx in [2, 5, 9]:
channame = cls.rword(20)
nixrc = nixchx.create_source(channame, chantype)
nixrc.definition = cls.rsentence(13)
nixrc.metadata = nixchx.metadata.create_section(
nixrc.name, "neo.channelindex.metadata"
)
nixrc.metadata.create_property("index", nixio.Value(idx))
dims = tuple(map(nixio.Value, cls.rquant(3, 1)))
nixrc.metadata.create_property("coordinates", dims)
nixrc.metadata.create_property("coordinates.units",
nixio.Value("um"))
nunits = 1
stsperunit = np.array_split(allspiketrains, nunits)
for idx in range(nunits):
unitname = "{}-unit{}".format(cls.rword(5), idx)
nixunit = nixchx.create_source(unitname, "neo.unit")
nixunit.definition = cls.rsentence(4, 10)
for st in stsperunit[idx]:
st.sources.append(nixchx)
st.sources.append(nixunit)
# pick a few signal groups to reference this CHX
randsiggroups = np.random.choice(allsignalgroups, 5, False)
for siggroup in randsiggroups:
for sig in siggroup:
sig.sources.append(nixchx)
return nixfile
@staticmethod
def rdate():
return datetime(year=np.random.randint(1980, 2020),
month=np.random.randint(1, 13),
day=np.random.randint(1, 29))
@classmethod
def populate_dates(cls, obj):
obj.file_datetime = cls.rdate()
obj.rec_datetime = cls.rdate()
@staticmethod
def rword(n=10):
return "".join(np.random.choice(list(string.ascii_letters), n))
@classmethod
def rsentence(cls, n=3, maxwl=10):
return " ".join(cls.rword(np.random.randint(1, maxwl))
for _ in range(n))
@classmethod
def rdict(cls, nitems):
rd = dict()
for _ in range(nitems):
key = cls.rword()
value = cls.rword() if np.random.choice((0, 1)) \
else np.random.uniform()
rd[key] = value
return rd
@staticmethod
def rquant(shape, unit, incr=False):
try:
dim = len(shape)
except TypeError:
dim = 1
if incr and dim > 1:
raise TypeError("Shape of quantity array may only be "
"one-dimensional when incremental values are "
"requested.")
arr = np.random.random(shape)
if incr:
arr = np.array(np.cumsum(arr))
return arr*unit
@classmethod
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
class NixIOWriteTest(NixIOTest):
def setUp(self):
self.filename = "nixio_testfile_write.h5"
self.writer = NixIO(self.filename, "ow")
self.io = self.writer
self.reader = nixio.File.open(self.filename,
nixio.FileMode.ReadOnly)
def tearDown(self):
del self.writer
self.reader.close()
os.remove(self.filename)
def write_and_compare(self, blocks):
self.writer.write_all_blocks(blocks)
self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)
def test_block_write(self):
block = Block(name=self.rword(),
description=self.rsentence())
self.write_and_compare([block])
block.annotate(**self.rdict(5))
self.write_and_compare([block])
def test_segment_write(self):
block = Block(name=self.rword())
segment = Segment(name=self.rword(), description=self.rword())
block.segments.append(segment)
self.write_and_compare([block])
segment.annotate(**self.rdict(2))
self.write_and_compare([block])
def test_channel_index_write(self):
block = Block(name=self.rword())
chx = ChannelIndex(name=self.rword(),
description=self.rsentence(),
index=[1, 2, 3, 5, 8, 13])
block.channel_indexes.append(chx)
self.write_and_compare([block])
chx.annotate(**self.rdict(3))
self.write_and_compare([block])
def test_signals_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
sampling_rate=pq.Quantity(10, "Hz"))
seg.analogsignals.append(asig)
self.write_and_compare([block])
anotherblock = Block("ir signal block")
seg = Segment("ir signal seg")
anotherblock.segments.append(seg)
irsig = IrregularlySampledSignal(
signal=np.random.random((20, 3)),
times=self.rquant(20, pq.ms, True),
units=pq.A
)
seg.irregularlysampledsignals.append(irsig)
self.write_and_compare([anotherblock])
block.segments[0].analogsignals.append(
AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
sampling_period=pq.Quantity(3, "s"),
dtype=np.double, name="signal42",
description="this is an analogsignal",
t_start=45 * pq.ms),
)
self.write_and_compare([block, anotherblock])
block.segments[0].irregularlysampledsignals.append(
IrregularlySampledSignal(times=np.random.random(10),
signal=np.random.random((10, 3)),
units="mV", time_units="s",
dtype=np.float,
name="some sort of signal",
description="the signal is described")
)
self.write_and_compare([block, anotherblock])
def test_epoch_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
epoch = Epoch(times=[1, 1, 10, 3]*pq.ms, durations=[3, 3, 3, 1]*pq.ms,
labels=np.array(["one", "two", "three", "four"]),
name="test epoch", description="an epoch for testing")
seg.epochs.append(epoch)
self.write_and_compare([block])
def test_event_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
event = Event(times=np.arange(0, 30, 10)*pq.s,
labels=np.array(["0", "1", "2"]),
name="event name",
description="event description")
seg.events.append(event)
self.write_and_compare([block])
def test_spiketrain_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
spiketrain = SpikeTrain(times=[3, 4, 5]*pq.s, t_stop=10.0,
name="spikes!", description="sssssspikes")
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
waveforms = self.rquant((20, 5, 10), pq.mV)
spiketrain = SpikeTrain(times=[1, 1.1, 1.2]*pq.ms, t_stop=1.5*pq.s,
name="spikes with wf",
description="spikes for waveform test",
waveforms=waveforms)
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
spiketrain.left_sweep = np.random.random(10)*pq.ms
self.write_and_compare([block])
def test_metadata_structure_write(self):
neoblk = self.create_all_annotated()
self.io.write_block(neoblk)
blk = self.io.nix_file.blocks[0]
blkmd = blk.metadata
self.assertEqual(blk.name, blkmd.name)
grp = blk.groups[0] # segment
self.assertIn(grp.name, blkmd.sections)
grpmd = blkmd.sections[grp.name]
for da in grp.data_arrays: # signals
name = ".".join(da.name.split(".")[:-1])
self.assertIn(name, grpmd.sections)
for mtag in grp.multi_tags: # spiketrains, events, and epochs
self.assertIn(mtag.name, grpmd.sections)
srcchx = blk.sources[0] # chx
self.assertIn(srcchx.name, blkmd.sections)
for srcunit in blk.sources: # units
self.assertIn(srcunit.name, blkmd.sections)
self.write_and_compare([neoblk])
def test_anonymous_objects_write(self):
nblocks = 2
nsegs = 2
nanasig = 4
nirrseg = 2
nepochs = 3
nevents = 4
nspiketrains = 3
nchx = 5
nunits = 10
times = self.rquant(1, pq.s)
signal = self.rquant(1, pq.V)
blocks = []
for blkidx in range(nblocks):
blk = Block()
blocks.append(blk)
for segidx in range(nsegs):
seg = Segment()
blk.segments.append(seg)
for anaidx in range(nanasig):
seg.analogsignals.append(AnalogSignal(signal=signal,
sampling_rate=pq.Hz))
for irridx in range(nirrseg):
seg.irregularlysampledsignals.append(
IrregularlySampledSignal(times=times,
signal=signal,
time_units=pq.s)
)
for epidx in range(nepochs):
seg.epochs.append(Epoch(times=times, durations=times))
for evidx in range(nevents):
seg.events.append(Event(times=times))
for stidx in range(nspiketrains):
seg.spiketrains.append(SpikeTrain(times=times, t_stop=pq.s,
units=pq.s))
for chidx in range(nchx):
chx = ChannelIndex(name="chx{}".format(chidx),
index=[1, 2])
blk.channel_indexes.append(chx)
for unidx in range(nunits):
unit = Unit()
chx.units.append(unit)
self.writer.write_all_blocks(blocks)
self.compare_blocks(blocks, self.reader.blocks)
def test_to_value(self):
section = self.io.nix_file.create_section("Metadata value test", "Test")
writeprop = self.io._write_property
# quantity
qvalue = pq.Quantity(10, "mV")
writeprop(section, "qvalue", qvalue)
self.assertEqual(section["qvalue"], 10)
self.assertEqual(section.props["qvalue"].unit, "mV")
# datetime
dt = self.rdate()
writeprop(section, "dt", dt)
self.assertEqual(datetime.fromtimestamp(section["dt"]), dt)
# string
randstr = self.rsentence()
writeprop(section, "randstr", randstr)
self.assertEqual(section["randstr"], randstr)
# bytes
bytestring = b"bytestring"
writeprop(section, "randbytes", bytestring)
self.assertEqual(section["randbytes"], bytestring.decode())
# iterables
randlist = np.random.random(10).tolist()
writeprop(section, "randlist", randlist)
self.assertEqual(randlist, section["randlist"])
randarray = np.random.random(10)
writeprop(section, "randarray", randarray)
np.testing.assert_almost_equal(randarray, section["randarray"])
# numpy item
npval = np.float64(2398)
writeprop(section, "npval", npval)
self.assertEqual(npval, section["npval"])
# number
val = 42
writeprop(section, "val", val)
self.assertEqual(val, section["val"])
# multi-dimensional data -- UNSUPORTED
# mdlist = [[1, 2, 3], [4, 5, 6]]
# writeprop(section, "mdlist", mdlist)
# mdarray = np.random.random((10, 3))
# writeprop(section, "mdarray", mdarray)
class NixIOReadTest(NixIOTest):
filename = "testfile_readtest.h5"
nixfile = None
nix_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "ro")
self.original_methods["_read_cascade"] = self.io._read_cascade
self.original_methods["_update_maps"] = self.io._update_maps
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
del self.io
def test_all_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=False)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_fullcascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=True)
nix_blocks = self.io.nix_file.blocks
# data objects should be empty
for block in neo_blocks:
for seg in block.segments:
for asig in seg.analogsignals:
self.assertEqual(len(asig), 0)
for isig in seg.irregularlysampledsignals:
self.assertEqual(len(isig), 0)
for epoch in seg.epochs:
self.assertEqual(len(epoch), 0)
for event in seg.events:
self.assertEqual(len(event), 0)
for st in seg.spiketrains:
self.assertEqual(len(st), 0)
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_lazycascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=True)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazycascade_read(self):
def getitem(self, index):
return self._data.__getitem__(index)
from neo.io.nixio import LazyList
getitem_original = LazyList.__getitem__
LazyList.__getitem__ = getitem
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
for seg in block.segments:
self.assertIsInstance(seg, string_types)
for chx in block.channel_indexes:
self.assertIsInstance(chx, string_types)
LazyList.__getitem__ = getitem_original
def test_load_lazy_cascade(self):
from neo.io.nixio import LazyList
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
name = block.name
block = self.io.load_lazy_cascade("/" + name, lazy=False)
self.assertIsInstance(block.segments, list)
self.assertIsInstance(block.channel_indexes, list)
for seg in block.segments:
self.assertIsInstance(seg.analogsignals, list)
self.assertIsInstance(seg.irregularlysampledsignals, list)
self.assertIsInstance(seg.epochs, list)
self.assertIsInstance(seg.events, list)
self.assertIsInstance(seg.spiketrains, list)
def test_nocascade_read(self):
self.io._read_cascade = mock.Mock()
neo_blocks = self.io.read_all_blocks(cascade=False)
self.io._read_cascade.assert_not_called()
for block in neo_blocks:
self.assertEqual(len(block.segments), 0)
nix_block = self.io.nix_file.blocks[block.name]
self.compare_attr(block, nix_block)
def test_lazy_load_subschema(self):
blk = self.io.nix_file.blocks[0]
segpath = "/" + blk.name + "/segments/" + blk.groups[0].name
segment = self.io.load_lazy_cascade(segpath, lazy=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(segment.name, blk.groups[0].name)
self.assertIs(segment.block, None)
self.assertEqual(len(segment.analogsignals[0]), 0)
segment = self.io.load_lazy_cascade(segpath, lazy=False)
self.assertEqual(np.shape(segment.analogsignals[0]), (100, 3))
class NixIOHashTest(NixIOTest):
def setUp(self):
self.hash = NixIO._hash_object
def _hash_test(self, objtype, argfuncs):
attr = {}
for arg, func in argfuncs.items():
attr[arg] = func()
obj_one = objtype(**attr)
obj_two = objtype(**attr)
hash_one = self.hash(obj_one)
hash_two = self.hash(obj_two)
self.assertEqual(hash_one, hash_two)
for arg, func in argfuncs.items():
chattr = attr.copy()
chattr[arg] = func()
obj_two = objtype(**chattr)
hash_two = self.hash(obj_two)
self.assertNotEqual(
hash_one, hash_two,
"Hash test failed with different '{}'".format(arg)
)
def test_block_seg_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"rec_datetime": self.rdate,
"file_datetime": self.rdate,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Block, argfuncs)
self._hash_test(Segment, argfuncs)
self._hash_test(Unit, argfuncs)
def test_chx_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"index": lambda: np.random.random(10).tolist(),
"channel_names": lambda: self.rsentence(10).split(" "),
"coordinates": lambda: [(np.random.random() * pq.cm,
np.random.random() * pq.cm,
np.random.random() * pq.cm)]*10,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(ChannelIndex, argfuncs)
def test_analogsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"sampling_rate": lambda: np.random.random() * pq.Hz,
"t_start": lambda: np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * pq.sec,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(AnalogSignal, argfuncs)
def test_irregularsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"times": lambda: self.rquant(10, pq.ms, True),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(IrregularlySampledSignal, argfuncs)
def test_event_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms),
"durations": lambda: self.rquant(10, pq.ms),
"labels": lambda: self.rsentence(10).split(" "),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Event, argfuncs)
self._hash_test(Epoch, argfuncs)
def test_spiketrain_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms, True),
"t_start": lambda: -np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * 100 * pq.sec,
"waveforms": lambda: self.rquant((10, 10, 20), pq.mV),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(SpikeTrain, argfuncs)
class NixIOPartialWriteTest(NixIOTest):
filename = "testfile_partialwrite.h5"
nixfile = None
neo_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "rw")
self.neo_blocks = self.io.read_all_blocks()
self.original_methods["_write_attr_annotations"] =\
self.io._write_attr_annotations
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
self.restore_methods()
del self.io
def restore_methods(self):
for name, method in self.original_methods.items():
setattr(self.io, name, self.original_methods[name])
def _mock_write_attr(self, objclass):
typestr = str(objclass.__name__).lower()
self.io._write_attr_annotations = mock.Mock(
wraps=self.io._write_attr_annotations,
side_effect=self.check_obj_type("neo.{}".format(typestr))
)
neo_blocks = self.neo_blocks
self.modify_objects(neo_blocks, excludes=[objclass])
self.io.write_all_blocks(neo_blocks)
self.restore_methods()
def check_obj_type(self, typestring):
neq = self.assertNotEqual
def side_effect_func(*args, **kwargs):
obj = kwargs.get("nixobj", args[0])
if isinstance(obj, list):
for sig in obj:
neq(sig.type, typestring)
else:
neq(obj.type, typestring)
return side_effect_func
@classmethod
def modify_objects(cls, objs, excludes=()):
excludes = tuple(excludes)
for obj in objs:
if not (excludes and isinstance(obj, excludes)):
obj.description = cls.rsentence()
for container in getattr(obj, "_child_containers", []):
children = getattr(obj, container)
cls.modify_objects(children, excludes)
def test_partial(self):
for objclass in NixIO.supported_objects:
self._mock_write_attr(objclass)
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
def test_no_modifications(self):
self.io._write_attr_annotations = mock.Mock()
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
# clearing hashes and checking again
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = None
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
# changing hashes to force rewrite
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = "_"
self.io.write_all_blocks(self.neo_blocks)
callcount = self.io._write_attr_annotations.call_count
self.assertEqual(callcount, len(self.io._object_hashes))
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = NixIO
| StarcoderdataPython |
367333 | <filename>trajectory_analysis.py
#!/usr/bin/env python
# coding: utf-8
# Author : <NAME>
# Initial Date: Feb 21, 2021
# About: trajectory analysis is for functions/methods/classes which are used to analyze the tracjectories compiled from the ITRAC algorithm.
# Read associated README for full description
# License: MIT License
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
## general import for data manipulation, file gathering
import numpy as np
import matplotlib.pyplot as pt
import csv
import pandas as pd
import cantools
import matplotlib.animation as animation
from matplotlib import style
from haversine import haversine, Unit
import itertools
import math
import strym as s
import networkx as nx
import os
import cv2
#creating a video in 2D
def create_plots(video_data, plot_folderhz = 20, lMax = 200, lanes=2):
path = plot_folder.encode('unicode-escape').decode()
data_sec = video_data
t0= int(data_sec.head(1).t)
tf= int(data_sec.tail(1).t)
for second in range(t0,tf,1):
for i in range(0,hz):
data_t = data_sec.loc[ #find the data between the time between this frame and the next
(data_sec.t >= second + i*(1/hz)) & #the hz is the FPS
(data_sec.t <= second + (i+1)*(1/hz))
]
fig, ax = pt.subplots()
ax.scatter(x=data_t.x, y=data_t.y, s=10)
for j in range(0,len(data_t)):
cir = pt.Circle([data_t.iloc[j].x,data_t.iloc[j].y], radius = 1, fill = False, color = 'r')
ax.add_patch(cir)
if lanes >= 0:
#lane of travel
pt.hlines(-3.65/2,0,lMax,linestyles='dashed')
pt.hlines(3.65/2,0,lMax,linestyles='dashed')
if lanes >= 1:
#approximate lanes to left and right
pt.hlines(3*(-3.65/2),0,lMax,linestyles='dashed')
pt.hlines(3*(3.65/2),0,lMax,linestyles='dashed')
if lanes >= 2:
#approximate lanes to 2 to left and 2 to right
pt.hlines(5*(-3.65/2),0,lMax,linestyles='dashed')
pt.hlines(5*(3.65/2),0,lMax,linestyles='dashed')
plt.xlabel('Lateral Distance (m)')
plt.ylabel('Longitudinal Distance (m)')
plt.ylim(-13,13)
plt.xlim(0,lMax)
plt.title(str(second)+' to '+str(second+1))
plt.gca().invert_yaxis()
plt.savefig(os.path.join(path,"time"+format(second,"03d")+format(i,"03d")+".png"),bbox_inches='tight')
plt.show()
plt.close()
def video_publisher(plot_folder,name):
"""Create plots from create_plots, and make those plots into a video with video_publisher."""
image_folder = plot_folder.encode('unicode-escape').decode() #encode as raw string in case of weird pathnames
video_name = name
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
images.sort()
frame = cv2.imread(os.path.join(image_folder, images[len(images)-1]))
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
video = cv2.VideoWriter(video_name,fourcc, hz,(width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
#creating a video in 3D from Homography -- should I add here?
#common useful plots to look at
#add tracediffs stuff
| StarcoderdataPython |
5173890 | '''
This is a pasted boiler plate for reference. It is best to use an IDE like PyCharm or VSCode to edit these documents.
I added them to our project as boiler plates so that I would have offline access to study them.
'''
# Edit made to orginal Adafruit Driver to enable it to use the SMBus module
# instead of Adafruits i2c module.
# Editor: <NAME>
# Copyright (c) 2014 Adafruit Industries
# Author: <NAME>
#
# Based on the BMP280 driver with BME280 changes provided by
# <NAME>, Edinburgh (www.satsignal.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import time
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
# BME280 default address.
BME280_I2CADDR = 0x76
# Operating Modes
BME280_OSAMPLE_1 = 1
BME280_OSAMPLE_2 = 2
BME280_OSAMPLE_4 = 3
BME280_OSAMPLE_8 = 4
BME280_OSAMPLE_16 = 5
# BME280 Registers
BME280_REGISTER_DIG_T1 = 0x88 # Trimming parameter registers
BME280_REGISTER_DIG_T2 = 0x8A
BME280_REGISTER_DIG_T3 = 0x8C
BME280_REGISTER_DIG_P1 = 0x8E
BME280_REGISTER_DIG_P2 = 0x90
BME280_REGISTER_DIG_P3 = 0x92
BME280_REGISTER_DIG_P4 = 0x94
BME280_REGISTER_DIG_P5 = 0x96
BME280_REGISTER_DIG_P6 = 0x98
BME280_REGISTER_DIG_P7 = 0x9A
BME280_REGISTER_DIG_P8 = 0x9C
BME280_REGISTER_DIG_P9 = 0x9E
BME280_REGISTER_DIG_H1 = 0xA1
BME280_REGISTER_DIG_H2 = 0xE1
BME280_REGISTER_DIG_H3 = 0xE3
BME280_REGISTER_DIG_H4 = 0xE4
BME280_REGISTER_DIG_H5 = 0xE5
BME280_REGISTER_DIG_H6 = 0xE6
BME280_REGISTER_DIG_H7 = 0xE7
BME280_REGISTER_CHIPID = 0xD0
BME280_REGISTER_VERSION = 0xD1
BME280_REGISTER_SOFTRESET = 0xE0
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
BME280_REGISTER_CONFIG = 0xF5
BME280_REGISTER_PRESSURE_DATA = 0xF7
BME280_REGISTER_TEMP_DATA = 0xFA
BME280_REGISTER_HUMIDITY_DATA = 0xFD
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index+1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index+1] << 8) + data[index]
def getChar(data,index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data,index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
class BME280(object):
def __init__(self, mode=BME280_OSAMPLE_1, address=BME280_I2CADDR, i2c=None,
**kwargs):
self._logger = logging.getLogger('Adafruit_BMP.BMP085')
# Check that mode is valid.
if mode not in [BME280_OSAMPLE_1, BME280_OSAMPLE_2, BME280_OSAMPLE_4,
BME280_OSAMPLE_8, BME280_OSAMPLE_16]:
raise ValueError(
'Unexpected mode value {0}. Set mode to one of BME280_ULTRALOWPOWER, BME280_STANDARD, BME280_HIGHRES, or BME280_ULTRAHIGHRES'.format(mode))
self._mode = mode
# Create I2C device.
if i2c is None:
import smbus as I2C
i2c = I2C.SMBus(1)
self._device = i2c#i2c.get_i2c_device(address, **kwargs)
# Load calibration values.
self._load_calibration()
self._device.write_byte_data(address, BME280_REGISTER_CONTROL, 0x3F)
self.t_fine = 0.0
def _load_calibration(self):
cal1 = self._device.read_i2c_block_data(BME280_I2CADDR, 0x88, 24)
cal2 = self._device.read_i2c_block_data(BME280_I2CADDR, 0xA1, 1)
cal3 = self._device.read_i2c_block_data(BME280_I2CADDR, 0xE1, 7)
# Convert byte data to word values
self.dig_T1 = getUShort(cal1, 0)
self.dig_T2 = getShort(cal1, 2)
self.dig_T3 = getShort(cal1, 4)
self.dig_P1 = getUShort(cal1, 6)
self.dig_P2 = getShort(cal1, 8)
self.dig_P3 = getShort(cal1, 10)
self.dig_P4 = getShort(cal1, 12)
self.dig_P5 = getShort(cal1, 14)
self.dig_P6 = getShort(cal1, 16)
self.dig_P7 = getShort(cal1, 18)
self.dig_P8 = getShort(cal1, 20)
self.dig_P9 = getShort(cal1, 22)
self.dig_H1 = getUChar(cal2, 0)
self.dig_H2 = getShort(cal3, 0)
self.dig_H3 = getUChar(cal3, 2)
self.dig_H4 = getChar(cal3, 3)
self.dig_H4 = (self.dig_H4 << 24) >> 20
self.dig_H4 = self.dig_H4 | (getChar(cal3, 4) & 0x0F)
self.dig_H5 = getChar(cal3, 5)
self.dig_H5 = (self.dig_H5 << 24) >> 20
self.dig_H5 = self.dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
self.dig_H6 = getChar(cal3, 6)
'''
print 'dig_T1 = {0:d}'.format (self.dig_T1)
print 'dig_T2 = {0:d}'.format (self.dig_T2)
print 'dig_T3 = {0:d}'.format (self.dig_T3)
'''
'''
print '0xE4 = {0:2x}'.format (self._device.readU8 (BME280_REGISTER_DIG_H4))
print '0xE5 = {0:2x}'.format (self._device.readU8 (BME280_REGISTER_DIG_H5))
print '0xE6 = {0:2x}'.format (self._device.readU8 (BME280_REGISTER_DIG_H6))
print 'dig_H1 = {0:d}'.format (self.dig_H1)
print 'dig_H2 = {0:d}'.format (self.dig_H2)
print 'dig_H3 = {0:d}'.format (self.dig_H3)
print 'dig_H4 = {0:d}'.format (self.dig_H4)
print 'dig_H5 = {0:d}'.format (self.dig_H5)
print 'dig_H6 = {0:d}'.format (self.dig_H6)
'''
def read_raw_temp(self):
"""Reads the raw (uncompensated) temperature from the sensor."""
meas = self._mode
self._device.write_byte_data(BME280_I2CADDR,BME280_REGISTER_CONTROL_HUM, meas)
meas = self._mode << 5 | self._mode << 2 | 1
self._device.write_byte_data(BME280_I2CADDR,BME280_REGISTER_CONTROL, meas)
sleep_time = 0.00125 + 0.0023 * (1 << self._mode)
sleep_time = sleep_time + 0.0023 * (1 << self._mode) + 0.000575
sleep_time = sleep_time + 0.0023 * (1 << self._mode) + 0.000575
time.sleep(sleep_time) # Wait the required time
msb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_TEMP_DATA)
lsb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_TEMP_DATA + 1)
xlsb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_TEMP_DATA + 2)
raw = ((msb << 16) | (lsb << 8) | xlsb) >> 4
return raw
def read_raw_pressure(self):
"""Reads the raw (uncompensated) pressure level from the sensor."""
"""Assumes that the temperature has already been read """
"""i.e. that enough delay has been provided"""
msb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_PRESSURE_DATA)
lsb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_PRESSURE_DATA + 1)
xlsb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_PRESSURE_DATA + 2)
raw = ((msb << 16) | (lsb << 8) | xlsb) >> 4
return raw
def read_raw_humidity(self):
"""Assumes that the temperature has already been read """
"""i.e. that enough delay has been provided"""
msb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_HUMIDITY_DATA)
lsb = self._device.read_byte_data(BME280_I2CADDR,BME280_REGISTER_HUMIDITY_DATA + 1)
raw = (msb << 8) | lsb
return raw
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
# float in Python is double precision
UT = float(self.read_raw_temp())
var1 = (UT / 16384.0 - self.dig_T1 / 1024.0) * float(self.dig_T2)
var2 = ((UT / 131072.0 - self.dig_T1 / 8192.0) * (
UT / 131072.0 - self.dig_T1 / 8192.0)) * float(self.dig_T3)
self.t_fine = int(var1 + var2)
temp = ((var1 + var2) / 5120.0) - 9.3647 - 1.268279
return temp
def read_temperature2(self):
UT = float(self.read_raw_temp())
var1 = (((UT>>3) - (float(self.dig_T1<<1))) * float(self.dig_T2)) >> 11
var2 = (((((UT>>4) - (float(self.dig_T1))) * ((UT>>4)-float(self.dig_T1))) >> 12) * float(self.dig_T3)) >> 14
t_fine = var1 + var2
temp = (float((t_fine * 5 + 128) >> 8))/100
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
adc = self.read_raw_pressure()
var1 = self.t_fine / 2.0 - 64000.0
var2 = var1 * var1 * self.dig_P6 / 32768.0
var2 = var2 + var1 * self.dig_P5 * 2.0
var2 = var2 / 4.0 + self.dig_P4 * 65536.0
var1 = (self.dig_P3 * var1 * var1 / 524288.0 + self.dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.dig_P1
if var1 == 0:
return 0
p = 1048576.0 - adc
p = ((p - var2 / 4096.0) * 6250.0) / var1
var1 = self.dig_P9 * p * p / 2147483648.0
var2 = p * self.dig_P8 / 32768.0
p = p + (var1 + var2 + self.dig_P7) / 16.0
return p
def read_humidity(self):
adc = self.read_raw_humidity()
# print 'Raw humidity = {0:d}'.format (adc)
h = self.t_fine - 76800.0
h = (adc - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.8 * h)) * (
self.dig_H2 / 65536.0 * (1.0 + self.dig_H6 / 67108864.0 * h * (
1.0 + self.dig_H3 / 67108864.0 * h)))
h = h * (1.0 - self.dig_H1 * h / 524288.0)
if h > 100:
h = 100
elif h < 0:
h = 0
return h
| StarcoderdataPython |
11267050 | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import print_function
import distutils.ccompiler
import glob
import os
import os.path
import platform
import setuptools
import setuptools.command.build_ext
import subprocess
import sys
def is_64bit():
return sys.maxsize > 2**32
def is_32bit():
return is_64bit() == False
def is_arm():
return platform.machine().startswith('arm')
def determine_cross_compile_args():
host_arch = platform.machine()
if (host_arch == 'AMD64' or host_arch == 'x86_64') and is_32bit() and sys.platform != 'win32':
return ['-DCMAKE_C_FLAGS=-m32']
return []
def determine_generator_args():
if sys.platform == 'win32':
try:
# See which compiler python picks
compiler = distutils.ccompiler.new_compiler()
compiler.initialize()
# Look at compiler path to divine the Visual Studio version.
# This technique may not work with customized VS install paths.
# An alternative would be to utilize private python calls:
# (distutils._msvccompiler._find_vc2017() and _find_vc2015()).
if '\\Microsoft Visual Studio\\2019' in compiler.cc:
vs_version = 16
vs_year = 2019
elif '\\Microsoft Visual Studio\\2017' in compiler.cc:
vs_version = 15
vs_year = 2017
elif '\\Microsoft Visual Studio 14.0' in compiler.cc:
vs_version = 14
vs_year = 2015
assert(vs_version and vs_year)
except BaseException:
raise RuntimeError('No supported version of MSVC compiler could be found!')
print('Using Visual Studio', vs_version, vs_year)
vs_version_gen_str = "Visual Studio {} {}".format(vs_version, vs_year)
if vs_year <= 2017:
# For VS2017 and earlier, architecture goes at end of generator string
if is_64bit():
vs_version_gen_str += " Win64"
return ['-G', vs_version_gen_str]
# For VS2019 (and presumably later), architecture is passed via -A flag
arch_str = "x64" if is_64bit() else "Win32"
return ['-G', vs_version_gen_str, '-A', arch_str]
return []
class AwsLib(object):
def __init__(self, name, extra_cmake_args=[]):
self.name = name
self.extra_cmake_args = extra_cmake_args
# The extension depends on these libs.
# They're built along with the extension, in the order listed.
AWS_LIBS = []
if sys.platform != 'darwin' and sys.platform != 'win32':
AWS_LIBS.append(AwsLib('s2n'))
AWS_LIBS.append(AwsLib('aws-c-common'))
AWS_LIBS.append(AwsLib('aws-c-io'))
AWS_LIBS.append(AwsLib('aws-c-cal'))
AWS_LIBS.append(AwsLib('aws-c-compression'))
AWS_LIBS.append(AwsLib('aws-c-http'))
AWS_LIBS.append(AwsLib('aws-c-auth'))
AWS_LIBS.append(AwsLib('aws-c-mqtt'))
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
DEP_BUILD_DIR = os.path.join(PROJECT_DIR, 'build', 'deps')
DEP_INSTALL_PATH = os.environ.get('AWS_C_INSTALL', os.path.join(DEP_BUILD_DIR, 'install'))
class awscrt_build_ext(setuptools.command.build_ext.build_ext):
def _build_dependency(self, aws_lib):
prev_cwd = os.getcwd() # restore cwd at end of function
lib_source_dir = os.path.join(PROJECT_DIR, 'aws-common-runtime', aws_lib.name)
build_type = 'Debug' if self.debug else 'RelWithDebInfo'
# Skip library if it wasn't pulled
if not os.path.exists(os.path.join(lib_source_dir, 'CMakeLists.txt')):
print("--- Skipping dependency: '{}' source not found ---".format(aws_lib.name))
return
print("--- Building dependency: {} ({}) ---".format(aws_lib.name, build_type))
lib_build_dir = os.path.join(DEP_BUILD_DIR, aws_lib.name)
if not os.path.exists(lib_build_dir):
os.makedirs(lib_build_dir)
os.chdir(lib_build_dir)
# cmake configure
cmake_args = ['cmake']
cmake_args.extend(determine_generator_args())
cmake_args.extend(determine_cross_compile_args())
cmake_args.extend([
'-DCMAKE_PREFIX_PATH={}'.format(DEP_INSTALL_PATH),
'-DCMAKE_INSTALL_PREFIX={}'.format(DEP_INSTALL_PATH),
'-DBUILD_SHARED_LIBS=OFF',
'-DCMAKE_BUILD_TYPE={}'.format(build_type),
'-DBUILD_TESTING=OFF',
'-DS2N_NO_PQ_ASM=ON',
])
if self.include_dirs:
cmake_args.append('-DCMAKE_INCLUDE_PATH={}'.format(';'.join(self.include_dirs)))
if self.library_dirs:
cmake_args.append('-DCMAKE_LIBRARY_PATH={}'.format(';'.join(self.library_dirs)))
cmake_args.extend(aws_lib.extra_cmake_args)
cmake_args.append(lib_source_dir)
subprocess.check_call(cmake_args)
# cmake build/install
build_cmd = [
'cmake',
'--build', './',
'--config', build_type,
'--target', 'install',
]
subprocess.check_call(build_cmd)
os.chdir(prev_cwd)
def run(self):
# build dependencies
for lib in AWS_LIBS:
self._build_dependency(lib)
# update paths so awscrt_ext can access dependencies
self.include_dirs.append(os.path.join(DEP_INSTALL_PATH, 'include'))
# some platforms (ex: fedora) use /lib64 instead of just /lib
lib_dir = 'lib'
if is_64bit() and os.path.exists(os.path.join(DEP_INSTALL_PATH, 'lib64')):
lib_dir = 'lib64'
if is_32bit() and os.path.exists(os.path.join(DEP_INSTALL_PATH, 'lib32')):
lib_dir = 'lib32'
self.library_dirs.append(os.path.join(DEP_INSTALL_PATH, lib_dir))
# continue with normal build_ext.run()
setuptools.command.build_ext.build_ext.run(self) # python2 breaks if we use super().run()
def awscrt_ext():
# fetch the CFLAGS/LDFLAGS from env
extra_compile_args = os.environ.get('CFLAGS', '').split()
extra_link_args = os.environ.get('LDFLAGS', '').split()
extra_objects = []
libraries = [x.name for x in AWS_LIBS]
# libraries must be passed to the linker with upstream dependencies listed last.
libraries.reverse()
if sys.platform == 'win32':
# the windows apis being used under the hood. Since we're static linking we have to follow the entire chain down
libraries += ['Secur32', 'Crypt32', 'Advapi32', 'BCrypt', 'Kernel32', 'Ws2_32', 'Shlwapi']
# Ensure that debug info is in the obj files, and that it is linked into the .pyd so that
# stack traces and dumps are useful
extra_compile_args += ['/Z7']
extra_link_args += ['/DEBUG']
elif sys.platform == 'darwin':
extra_link_args += ['-framework', 'Security']
# HACK: Don't understand why, but if AWS_LIBS are linked normally on macos, we get this error:
# ImportError: dlopen(_awscrt.cpython-37m-darwin.so, 2): Symbol not found: _aws_byte_cursor_eq_ignore_case
# Workaround is to pass them as 'extra_objects' instead of 'libraries'.
extra_objects = [os.path.join(DEP_INSTALL_PATH, 'lib', 'lib{}.a'.format(x.name)) for x in AWS_LIBS]
libraries = []
else: # unix
# linker will prefer shared libraries over static if it can find both.
# force linker to choose static one by using using "-l:libcrypto.a" syntax instead of just "-lcrypto".
libraries += [':libcrypto.a', 'rt']
if distutils.ccompiler.get_default_compiler() != 'msvc':
extra_compile_args += ['-Wextra', '-Werror', '-Wno-strict-aliasing', '-std=gnu99']
return setuptools.Extension(
'_awscrt',
language='c',
libraries=libraries,
sources=glob.glob('source/*.c'),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
extra_objects=extra_objects
)
setuptools.setup(
name="awscrt",
version="0.5.6",
author="Amazon Web Services, Inc",
author_email="<EMAIL>",
description="A common runtime for AWS Python projects",
url="https://github.com/awslabs/aws-crt-python",
packages=['awscrt'],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'enum34;python_version<"3.4"',
'futures;python_version<"3.2"',
],
ext_modules=[awscrt_ext()],
cmdclass={'build_ext': awscrt_build_ext},
test_suite='test',
tests_require=[
'boto3'
],
)
| StarcoderdataPython |
6617519 | <reponame>DreamBoatOve/aia_eis
"""
Algorithms from scratch: Gauss-Newton
https://omyllymaki.medium.com/gauss-newton-algorithm-implementation-from-scratch-55ebe56aac2e
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from loa.g_n.refers.g_n_refer_0.gn_solver import GNSolver
logging.basicConfig(level=logging.INFO)
NOISE = 3
COEFFICIENTS = [-0.001, 0.1, 0.1, 2, 15]
def func(x, coeff):
return coeff[0] * x ** 3 + coeff[1] * x ** 2 + coeff[2] * x + coeff[3] + coeff[4] * np.sin(x)
def main():
x = np.arange(1, 100)
y = func(x, COEFFICIENTS)
yn = y + NOISE * np.random.randn(len(x))
solver = GNSolver(fit_function=func, max_iter=100, tolerance_difference=10 ** (-6))
init_guess = 1000000*np.random.random(len(COEFFICIENTS))
_ = solver.fit(x, yn, init_guess)
fit = solver.get_estimate()
residual = solver.get_residual()
plt.plot(x, y, label="Original, noiseless signal", linewidth=2)
plt.plot(x, yn, label="Noisy signal", linewidth=2)
plt.plot(x, fit, label="Fit", linewidth=2)
plt.plot(x, residual, label="Residual", linewidth=2)
plt.title("Gauss-Newton: curve fitting example")
plt.xlabel("X")
plt.ylabel("Y")
plt.grid()
plt.legend()
plt.show()
if __name__ == "__main__":
main() | StarcoderdataPython |
9627236 | # Imports here
import matplotlib.pyplot as plt
# import pytorch
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
# image
from PIL import Image
import numpy as np
print(torch.__version__)
print(torch.cuda.is_available())
#argparse
import argparse
import random
import os
import json
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
from torchvision import models
model = models.vgg11(pretrained = True)
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(checkpoint['input_size'], checkpoint['hidden_layer_0'])),
('relu1', nn.ReLU()),
('drop_out1', nn.Dropout(0.2)),
('fc2', nn.Linear(checkpoint['hidden_layer_0'], checkpoint['hidden_layer_1'])),
('relu2', nn.ReLU()),
('drop_out2', nn.Dropout(0.2)),
('fc3', nn.Linear(checkpoint['hidden_layer_1'], checkpoint['output_size'])), #We'll be using this dataset of 102 flower categories
('output', nn.LogSoftmax(dim = 1))]))
model.class_to_idx=checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
model.epoch=checkpoint['epochs']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
image = image.resize((256, 256))
width, height = image.size
top_left_x = (width - 224)//2
top_left_y = (height - 224)//2
bottom_left_x = (width + 224)//2
bottom_left_y = (height + 224)//2
image = image.crop((top_left_x, top_left_y, bottom_left_x, bottom_left_y))
np_image = np.array(image)
#normalize the input image
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
im = (np_image - mean) / std
#The color channel needs to be first
return im.transpose(2,0,1)
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = Image.open(image_path)
image = process_image(image)
image = torch.FloatTensor([image])
model.eval()
output = model.forward(image.to(device))
probs = torch.exp(output)
probs, classes = probs.topk(topk)
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
classes = classes.to("cpu")
classes = classes.detach().numpy().tolist()[0]
categories = [idx_to_class[idx] for idx in classes]
return probs, classes, categories
if __name__ == "__main__":
#get user input
parser = argparse.ArgumentParser(description='Flowers Image Prediction.')
parser.add_argument('--input', type=str, default='flowers/test/1/image_06743.jpg', help='path to test flower dataset.')
parser.add_argument('--checkpoint', type=str, default='checkpoint.pth', help='path to the checkpoint file')
parser.add_argument('--gpu', action='store_true', help='Enable/Disable GPU')
parser.add_argument('--cat_to_name', type=str, default='cat_to_name.json', help='Path to JSON mapping file')
args = parser.parse_args()
print(args.input)
print(args.checkpoint)
print("Loading Checkpoint.!")
model = load_checkpoint(args.checkpoint)
print(model)
print("Model loaded.!")
device = 'cuda' if args.gpu else 'cpu'
print("device =", device)
model.to(device)
probs, classes, categories = predict(args.input, model, topk=5)
print("probabilty = ", probs)
print("classes =", classes)
print("categories =", categories)
print("cat_to_name: ", args.cat_to_name)
with open(args.cat_to_name, 'r') as f:
flower_to_name = json.load(f)
for c in categories:
print (flower_to_name[c]) | StarcoderdataPython |
1984212 | <reponame>m4reQ/Oss-2.0<gh_stars>1-10
if __name__ == '__main__':
quit()
def CheckResponse():
return True
import os
def InstallPackages():
try:
os.system('py -m pip install -r requirements.txt')
except Exception:
try:
os.system('python -m pip install -r requirements.txt')
except Exception:
print('Cannot install packages.')
raise
print('Modules installed. Please restart application.')
os.system("pause >NUL")
quit() | StarcoderdataPython |
3217408 | from random import randint
from time import sleep
from operator import itemgetter
jogos = {}
print('Valores sorteados:')
for c in range (0,4):
jogos[f'jogador{c+1}'] = randint(1,6)
for k, v in jogos.items():
sleep(0.5)
print(f' O {k} tirou {v}')
print('Ranking dos jogadores:')
ranking = sorted(jogos.items(), key = itemgetter(1), reverse = True)
for pos, n in enumerate(ranking):
sleep(0.5)
print(f' {pos+1}º: {n[0]} com {n[1]}')
| StarcoderdataPython |
1636049 | <reponame>agustinhenze/mibs.snmplabs.com<gh_stars>1-10
#
# PySNMP MIB module IPSEC-FLOW-MIB-TC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPSEC-FLOW-MIB-TC
# Produced by pysmi-0.3.4 at Wed May 1 13:56:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, ModuleIdentity, Counter32, Integer32, NotificationType, Bits, Gauge32, IpAddress, Counter64, MibIdentifier, iso, experimental, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "ModuleIdentity", "Counter32", "Integer32", "NotificationType", "Bits", "Gauge32", "IpAddress", "Counter64", "MibIdentifier", "iso", "experimental", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ipsecFlowMibTC = ModuleIdentity((1, 3, 6, 1, 3, 170))
if mibBuilder.loadTexts: ipsecFlowMibTC.setLastUpdated('200302171158Z')
if mibBuilder.loadTexts: ipsecFlowMibTC.setOrganization('Tivoli Systems and Cisco Systems')
if mibBuilder.loadTexts: ipsecFlowMibTC.setContactInfo('Tivoli Systems Research Triangle Park, NC Cisco Systems 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: <EMAIL> <EMAIL>')
if mibBuilder.loadTexts: ipsecFlowMibTC.setDescription('This MIB module defines the textual conventions used in the IPsec Flow Monitoring MIB. This includes Internet DOI numbers defined in RFC 2407, ISAKMP numbers defined in RFC 2408, and IKE numbers defined in RFC 2409. Revision control of this document after publication will be under the authority of the IANA.')
class ControlProtocol(TextualConvention, Integer32):
description = 'The protocol used for keying and control. The value of cp_none indicate manual administration of IPsec tunnels. This enumeration will be expanded as new keying protocols are standardized.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))
namedValues = NamedValues(("reserved", 0), ("cpNone", 1), ("cpIkev1", 2), ("cpIkev2", 3), ("cpKink", 4), ("cpOther", 5))
class Phase1PeerIdentityType(TextualConvention, Integer32):
description = 'The type of IPsec Phase-1 peer identity. The peer may be identified by one of the ID types defined in IPSEC DOI. id_dn represent the binary DER encoding of the identity.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
namedValues = NamedValues(("reserved", 0), ("idIpv4Addr", 1), ("idFqdn", 2), ("idDn", 3), ("idIpv6Addr", 4), ("idUserFqdn", 5), ("idIpv4AddrSubnet", 6), ("idIpv6AddrSubnet", 7), ("idIpv4AddrRange", 8), ("idIpv6AddrRange", 9), ("idDerAsn1Gn", 10), ("idKeyId", 11))
class IkeNegoMode(TextualConvention, Integer32):
description = 'The IPsec Phase-1 IKE negotiation mode.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("reserved", 0), ("main", 1), ("aggressive", 2))
class IkeHashAlgo(TextualConvention, Integer32):
description = 'The hash algorithm used in IPsec Phase-1 IKE negotiations.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("reserved", 0), ("md5", 1), ("sha", 2), ("tiger", 3), ("sha256", 4), ("sha384", 5), ("sha512", 6))
class IkeAuthMethod(TextualConvention, Integer32):
description = 'The authentication method used in IPsec Phase-1 IKE negotiations.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("reserved", 0), ("preSharedKey", 1), ("dssSignature", 2), ("rsaSignature", 3), ("rsaEncryption", 4), ("revRsaEncryption", 5), ("elGamalEncryption", 6), ("revElGamalEncryption", 7), ("ecsdaSignature", 8), ("gssApiV1", 9), ("gssApiV2", 10))
class DiffHellmanGrp(TextualConvention, Integer32):
description = 'The Diffie Hellman Group used in negotiations. reserved -- reserved groups modp768 -- 768-bit MODP modp1024 -- 1024-bit MODP modp1536 -- 1536-bit MODP group ec2nGP155 -- EC2N group on GP[2^155] ec2nGP185 -- EC2N group on GP[2^185] ec2nGF163 -- EC2N group over GF[2^163] ec2nGF283 -- EC2N group over GF[2^283] ec2nGF409 -- EC2N group over GF[2^409] ec2nGF571 -- EC2N group over GF[2^571] '
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 8, 10, 12))
namedValues = NamedValues(("reserved", 0), ("modp768", 1), ("modp1024", 2), ("ec2nGP155", 3), ("ec2nGP185", 4), ("modp1536", 5), ("ec2nGF163", 6), ("ec2nGF283", 8), ("ec2nGF409", 10), ("ec2nGF571", 12))
class EncapMode(TextualConvention, Integer32):
description = 'The encapsulation mode used by an IPsec Phase-2 Tunnel.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("reserved", 0), ("tunnel", 1), ("transport", 2))
class EncryptAlgo(TextualConvention, Integer32):
description = 'The encryption algorithm used in negotiations.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("reserved", 0), ("espDes", 1), ("esp3des", 2), ("espRc5", 3), ("espIdea", 4), ("espCast", 5), ("espBlowfish", 6), ("esp3idea", 7), ("espRc4", 8), ("espNull", 9), ("espAes", 10))
class Spi(TextualConvention, Integer32):
description = 'The type of the SPI associated with IPsec Phase-2 security associations.'
status = 'current'
displayHint = 'x'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(256, 4294967295)
class AuthAlgo(TextualConvention, Integer32):
description = 'The authentication algorithm used by a security association of an IPsec Phase-2 Tunnel.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("reserved", 0), ("hmacMd5", 2), ("hmacSha", 3), ("desMac", 4), ("hmacSha256", 5), ("hmacSha384", 6), ("hmacSha512", 7), ("ripemd", 8))
class CompAlgo(TextualConvention, Integer32):
description = 'The compression algorithm used by a security association of an IPsec Phase-2 Tunnel.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))
namedValues = NamedValues(("reserved", 0), ("compOui", 1), ("compDeflate", 2), ("compLzs", 3), ("compLzjh", 4))
class EndPtType(TextualConvention, Integer32):
description = 'The type of identity use to specify an IPsec End Point.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))
namedValues = NamedValues(("reserved", 0), ("idIpv4Addr", 1), ("idFqdn", 2), ("idUserFqdn", 3), ("idIpv4AddrSubnet", 4), ("idIpv6Addr", 5), ("idIpv6AddrSubnet", 6), ("idIpv4AddrRange", 7), ("idIpv6AddrRange", 8), ("idDerAsn1Dn", 9), ("idDerAsn1Gn", 10), ("idKeyId", 11))
mibBuilder.exportSymbols("IPSEC-FLOW-MIB-TC", Spi=Spi, ipsecFlowMibTC=ipsecFlowMibTC, IkeHashAlgo=IkeHashAlgo, Phase1PeerIdentityType=Phase1PeerIdentityType, EncapMode=EncapMode, EncryptAlgo=EncryptAlgo, IkeAuthMethod=IkeAuthMethod, DiffHellmanGrp=DiffHellmanGrp, ControlProtocol=ControlProtocol, AuthAlgo=AuthAlgo, PYSNMP_MODULE_ID=ipsecFlowMibTC, CompAlgo=CompAlgo, IkeNegoMode=IkeNegoMode, EndPtType=EndPtType)
| StarcoderdataPython |
323648 | import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
import multiprocessing as mp
from functools import partial
import os
import linecache
import sys
import traceback
from inspect import getmembers, isfunction
import inspect
plt.rcParams.update({'figure.max_open_warning': 0})
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def Gauss(Data,Band, mon=False, Space = None):
if Space is None:
Space = np.linspace(2, 8, 100)
Data = [elem for elem in Data if 1 < elem < 9]
A=[]; Data=np.asarray(Data)
if len(Data) > 10:
for i in range(len(Data)):
A.append(norm.pdf(Space, Data[i],Band))
Density = np.asarray(np.sum(A, axis=0))
Density = Density/np.trapz(Density, Space) #For normalisation purposes
if mon == False:
Min = (np.diff(np.sign(np.diff(Density))) > 0).nonzero()[0] + 1 # local min
R_Cut = Space[Min][np.where(Space[Min]>3)][0]
return Space, Density, R_Cut
elif mon == True:
return Space, Density
else:
return None
class Plot_Funcs():
def __init__(self, MetaData = None, Errors = None, Quantities = None, System = None):
if System == None:
self.System = None
self.Base = ''
self.Images = ''
self.single_file = True
else:
self.System = System
try:
self.Base = System['base_dir']
except KeyError:
self.Base = ''
try:
self.Images = System['plot_dir']
self.ensure_dir(self.Base + self.Images)
except KeyError:
self.Images = ''
if MetaData is None:
sys.exit("\nNo metadata provided for analysis.\nNow exiting.\n")
else:
self.Meta = MetaData
if Errors is None:
self.Errors = False
with open(self.Base+'Plotting_Info.txt', "a+") as f:
f.write("\nNo errors have been provided.\nHence, no errors will be plotted.\n")
else:
self.Err = Errors
self.Errors = True
if Quantities is None:
sys.exit("\nNo quantities requested.\nNow exiting.\n")
else:
self.Quantities = Quantities
self.functions_list = [o for o in getmembers(Plot_Funcs) if isfunction(o[1])]
self.Functions = {}
"""This provides a dictionary with the function names as keys and the
function itself, plus arguments following.
The reason for the arguments is so that user defined input arguments
may be identified and fed in correctly."""
for x in self.functions_list:
self.Functions[x[0]] = inspect.getfullargspec(x[1])[0][1:]
self.Q_Keys = self.Quantities.keys()
self.Meta_Keys = self.Meta.keys()
self.Plot_Dict = {}
for obj in self.Q_Keys:
for item in self.functions_list:
if obj.lower() in item[0].lower():
self.Plot_Dict[item[0]] = [item[1]]
def ensure_dir(self, file_path=''):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def Make_Plots(self):
"""
Robert:
This is the function that calls all of the desired functions for
creating plots.
The list of function names and arguments are already pre-defined and
so this function simply parses through the user input.
Still need to make a robust sanitation of user input but that may come later.
Returns
-------
None.
"""
for x in self.Q_Keys:
if x in self.Functions:
ArgsList = []
for y in self.Functions[x]:
try:
ArgsList.append(self.Quantities[x][y])
except KeyError:
ArgsList.append(
inspect.getargspec(self.Plot_Dict[x][0])[-1][self.Functions[x].index(y)]
)
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThe arguments for function %s are %s.\n"%(x,ArgsList))
getattr(self, x)(*ArgsList)
def Collect_CNA(self, Sig):
try:
Index = self.Meta["masterkey"].index( Sig )
return [ self.Meta['cna_sigs'][x][Index] for x in range(len(self.Meta['cna_sigs'])) ]
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nData not found in metadata\n")
return None
def Collect_CNA_error(self, Sig):
try:
Index = self.Meta["masterkey"].index( Sig )
return [ self.Err['cna_sigs'][x][Index] for x in range(len(self.Err['cna_sigs'])) ]
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nData not found in metadata\n")
return None
def autolabel(self, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize = 18)
def agcn_heat(self, Name = 'agcn_Heat.png'):
Bins = np.linspace(3,12,41)
Heat = []
try:
for frame in range( len(self.Meta['agcn']) ):
a,b = np.histogram( self.Meta['agcn'][frame], bins = Bins )
Heat.append(a)
YTicks = np.array( [ "{:.1f}".format(x) for x in np.linspace(3,12,20) ] )
try:
XTicks = np.array( [ "{:.0f}".format(t) for t in np.linspace( self.Meta['SimTime'][0], self.Meta['SimTime'][-1] ,25) ], dtype = int )
except KeyError:
XTicks = np.array( [ "{:.0f}".format(t) for t in np.linspace( self.Meta['Start'], self.Meta['End'] ,25) ], dtype = int )
Heat = ( np.asanyarray(Heat) ).transpose()
ax = sns.heatmap(Heat, cmap = 'hot')
ax.set_xlabel("Frame", fontsize = 14)
ax.set_ylabel("AGCN", fontsize =14)
ax.set_xticklabels(XTicks)
ax.set_yticklabels(YTicks)
plt.savefig(self.Base+self.Images+'/'+Name, dpi = 100, bbox_inches='tight')
plt.close()
except KeyError:
print("\nThis quantity does not exist in the metadata.\n")
return None
def prdf_plot(self, Names = None, Frames = [], He = False, Ho = None, Errors = False):
Frames = list(Frames)
if self.Errors is True:
Errors = True
"""
Name: str 'pdf' 'rdf'
Frames: list frames to be reviewed
He: bool Whether to look for hetero quantities default is False
Homo: List of atomic species to be considered as homo pairs only - default is empty list
Parameters
----------
Name : TYPE
DESCRIPTION.
Frames : TYPE, optional
DESCRIPTION. The default is [].
He : TYPE, optional
DESCRIPTION. The default is None.
Homo : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
for Name in Names:
for frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches((9,3))
try:
ax.plot(self.Meta[Name][Frames.index(frame)][0], self.Meta[Name][Frames.index(frame)][1],
color='k', linestyle = 'solid', linewidth = 4, label = "Full system")
if Errors is True:
ax.fill_between(self.Meta[Name][Frames.index(frame)][0],
self.Meta[Name][Frames.index(frame)][1] + self.Err[Name][Frames.index(frame)][1],
self.Meta[Name][Frames.index(frame)][1] - self.Err[Name][Frames.index(frame)][1],
color='k', alpha = 0.25)
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=2, mode="expand", borderaxespad=0. ,fontsize = 14)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s was not found in the provided metadata.\n"%Name)
if He is False:
pass
else:
try:
ax.plot(self.Meta['He'+Name.upper()][Frames.index(frame)][0], self.Meta['He'+Name.upper()][Frames.index(frame)][1],
color='r', linestyle = 'dashed', linewidth = 4, label = "Pair different only")
if Errors is True:
ax.fill_between(self.Meta['He'+Name.upper()][Frames.index(frame)][0],
self.Meta['He'+Name.upper()][Frames.index(frame)][1] + self.Err['He'+Name.upper()][Frames.index(frame)][1],
self.Meta['He'+Name.upper()][Frames.index(frame)][1] - self.Err['He'+Name.upper()][Frames.index(frame)][1],
color='r', alpha = 0.25)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s was not found in the metadata.\n"%Name)
if Ho is None:
pass
elif type(Ho) is list:
for ele in Ho:
ax.plot(self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][0],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1],
linestyle = 'dashdot', linewidth = 4,
label ="%s only"%ele)
if Errors is True:
ax.fill_between(self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][0],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1] + self.Err['Ho'+Name.upper()+ele][Frames.index(frame)][1],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1] - self.Err['Ho'+Name.upper()+ele][Frames.index(frame)][1],
alpha = 0.25)
else:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nError in input arguments for the prdf_plot.\n")
ax.tick_params(axis = 'both', which = 'major', labelsize = 12)
ax.set_xlabel(r"Distance (Angstrom)", fontsize = 12)
ax.set_ylabel(Name, fontsize = 12)
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=2, mode="expand", borderaxespad=0. ,fontsize = 14)
try:
FrameTemp = "{:.1f}".format(self.Meta['Temp'][int(frame)])
ax.text(1.4*np.amin(self.Meta[Name][Frames.index(frame)][0]), 0.9*np.amax(self.Meta[Name][Frames.index(frame)][1]),
"Time: %sps\nTemp: %sK" %(self.Meta['SimTime'][int(frame)],
FrameTemp), fontsize=13)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s threw an error when using the prdf_plot function.\n"%frame)
if He is False:
if Ho is None:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
elif type(Ho) is list:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + '_Ho_' + ''.join(map(str, Ho)) +'_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
if Ho is None:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
elif type(Ho) is list:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + '_Ho_' + ''.join(map(str, Ho)) +'_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def plot_stats(self, Stats = [], Species = None, Quants = [], Temp = False, Errors = False, Frames = None):
if self.Errors is True:
Errors = True
if Frames is None:
try:
TimeAxis = range(int(self.Meta['Start']),
int(self.Meta['SimTime'][-1]),
int(int(self.Meta['Skip']) * int(self.Meta['SimTime'][-1]) / int(self.Meta['End'])))
except KeyError:
TimeAxis = range(int(self.Meta['Start']),
int(self.Meta['End']),
int(self.Meta['Step']))
else:
TimeAxis = Frames
for Stat in Stats:
fig,ax = plt.subplots()
fig.set_size_inches((9,3))
for Quant in Quants:
try:
ax.plot(TimeAxis,
self.Meta[Stat+Quant.lower()],
label = Quant.lower())
if Errors is True:
ax.fill_between(TimeAxis,
self.Meta[Stat+Quant.lower()] - self.Err[Stat+Quant.lower()],
self.Meta[Stat+Quant.lower()] + self.Err[Stat+Quant.lower()],
alpha = 0.25)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo %s found in metadata.\n"%(Stat+Quant.lower()))
try:
ax2=ax.twinx()
ax2.scatter(TimeAxis,
self.Meta['R_Cut'],
linewidths=4, label = 'R_Cut', color='g')
if Species is not None:
for x in Species:
ax2.scatter(TimeAxis,
self.Meta['Cut' + x],
linewidths=4, label = 'R_Cut' + x)
if Errors is True:
ax2.errorbar(TimeAxis, self.Meta['R_Cut'],
self.Err['R_Cut'], color='g',
capsize = 5, capthick = 3)
if Species is not None:
for x in Species:
ax2.errorbar(TimeAxis,self.Meta['Cut' + x],
self.Err['Cut' + x],
capsize = 5, capthick = 3)
except KeyError:
pass
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(Stat.upper())
if Temp is True:
ax3 = ax.twiny()
ax1Ticks = ax.get_xticks()
ax3Ticks = ax1Ticks
ax3.set_xticks(ax2Ticks)
ax3.set_xbound(ax.get_xbound())
ax3.set_xticklabels(tick_function(ax2Ticks))
ax3.set_xlabel('Temperature (K)')
plt.savefig(self.Base + self.Images + '/' + str(Stat) + '.png' , dpi = 100, bbox_inches='tight')
plt.close()
def tick_function(self, X):
try:
inc = (max(self.Meta['Temp']) - min(self.Meta['Temp']))/( 10*len(self.Meta['Temp']) )
V = min(self.Meta['Temp']) + X*inc
return ["%.3f" % z for z in V]
except KeyError:
return None
def com_plot_bi(self, Dists = None, Species = None, Frames = [0], Errors = False):
if self.Errors is True:
Errors = True
if Dists is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo distributions requested.\n")
return None
elif type(Dists) is list:
for Dist in Dists:
if Dist is "MidCoMDist":
D = "Cluster Centre"
elif Dist is "CoMDist":
D = "Sub-cluster Centre"
else:
raise KeyError("Invalid distribution.\n")
if Species is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo chemical species requested.\n")
elif type(Species) is list:
for Specie in Species:
for frame in Frames:
try:
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta[Dist + Specie][frame], color= 'k', linewidth = 4)
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'],
self.Meta[Dist + Specie][frame] + self.Err[Dist + Specie][frame],
self.Meta[Dist + Specie][frame] - self.Err[Dist + Specie][frame],
color = 'k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('Probability')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta[Dist + Specie][frame]), "%s to %s\nTime: %sps\nTemp: %sK"
%(Specie, D, self.Meta['SimTime'][frame], "{:.1f}".format(self.Meta['Temp'][frame])))
except KeyError:
pass
plt.savefig(self.Base + self.Images + '/' + Dist+Specie+str(frame) + '.png',
dpi = 100, bbox_inches='tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThere was an error trying to plot %s.\n" %(Dist+Specie))
pass
def cna_plot(self, Name = 'CNA_Time', Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
try:
X_CNA = [ str(a) for a in self.Meta['masterkey'] ] # Create a set of ticks for the x-axis
fig = plt.figure(figsize = (9,3) )
if Errors is True:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], yerr = self.Err['cna_sigs'][Frame], tick_label = X_CNA )
else:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], tick_label = X_CNA)
plt.xlabel("CNA Signature", fontsize = 14)
plt.ylabel("Probability", fontsize = 14)
plt.xticks(rotation=90,fontsize = 14)
try:
plt.text( X_CNA[-7], 0.8*np.amax(self.Meta['cna_sigs'][Frame]),
'Time: %sps\nTemp: %sK' %(self.Meta["SimTime"][Frame],
"{:.1f}".format(self.Meta['Temp'][Frame])), fontsize = 14 )
except KeyError:
pass
plt.savefig(self.Base+self.Images+'/'+Name+str(Frame)+'.png', dpi = 100, bbox_inches = 'tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThis quantitiy, cna, does not exist in the metadata.\n")
return None
def agcn_histo(self, Frames = [], Errors = False):
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
y,binEdges = np.histogram(self.Meta['agcn'][Frame], bins = 40)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
ax.bar(bincenters, y, color='r')
try:
ax.text(bincenters[4], 0.7*np.amax(y), "Time : %sps\nTemp : %sK"%(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])) )
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist'+str(self.Meta['SimTime'][Frame])+'.png', dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist.png', dpi = 100, bbox_inches='tight')
plt.close()
def com_full_plot(self, Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta['CoMDist'][Frame], color='k')
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'] ,
self.Meta['CoMDist'][Frame] + self.Err['CoMDist'][Frame],
self.Meta['CoMDist'][Frame] - self.Err['CoMDist'][Frame],
color='k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('RDF')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta['CoMDist'][Frame]), "Full System\nTime: %sps\nTemp: %sK" %(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])))
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM.png', dpi = 100, bbox_inches='tight')
plt.close()
def Mass(self,r):
return ((4/3) * np.pi * r**3 )
def cum_com(self, Frames):
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for Frame in Frames:
Int = [ np.trapz(self.Meta['CoMDist'][Frame][:x], self.Meta['CoMSpace'][:x]) for x in range(100) ]
try:
ax.plot(self.Meta['CoMSpace'], Int, label = '%sps' %(self.Meta['SimTime'][Frame]))
except KeyError:
ax.plot(self.Meta['CoMSpace'], Int, label = str(Frame))
ax.plot(self.Meta['CoMSpace'], self.Mass(self.Meta['CoMSpace'])/max(self.Mass(self.Meta['CoMSpace'])), label = 'Spherical mass distribution', linestyle = 'dashed')
ax.set_xlabel('Distance from centre (Angstrom)')
ax.set_ylabel('M(r) / M(R)')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
try:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM.png',
dpi = 100, bbox_inches='tight')
plt.close()
def cna_traj(self, Sigs = [], Errors = False):
if self.Errors is True:
Errors = True
try:
Time = self.Meta['SimTime']
except KeyError:
Time = range(len(self.Meta['cna_sigs']))
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for x in Sigs:
try:
ax.plot(Time, self.Collect_CNA(x), label = x)
if Errors is True:
ax.fill_between(Time,
np.asarray(self.Collect_CNA(x)) + np.asarray(self.Collect_CNA_error(x)),
np.asarray(self.Collect_CNA(x)) - np.asarray(self.Collect_CNA_error(x)),
alpha = 0.25)
except ValueError:
print(x, type(x))
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write(f"\nSignature, '{0}', not in metadata.\n".format(x))
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Probability')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'CNA_Traj'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def h_c(self, Errors = False):
if self.Errors is True:
Errors = True
Time = self.Meta['SimTime']
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Time, self.Meta['h'], label = 'Collectivity')
ax.plot(Time, self.Meta['c'], label = 'Concertedness')
if Errors is True:
ax.fill_between(Time[1:],
self.Meta['h']+self.Err['h'],
self.Meta['h']-self.Err['h'],
alpha = 0.25)
ax.fill_between(Time[2:-1],
self.Meta['c']+self.Err['c'],
self.Meta['c']-self.Err['c'],
alpha = 0.25)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(' H / C')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'HC_Stats'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def pair_plot(Data, System):
try:
HeAdj = Data['HeAdj']
NewHe = []
except KeyError:
sys.exit()
for x in range(len(HeAdj)):
try:
NewHe.append(sum(HeAdj[x][1]))
except TypeError:
pass
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Data['SimTime'], [sum(Data['HoAdjPt'][x]) for x in range(len(Data['HoAdjPt']))], 'orange', label='Pt only')
ax2 = ax.twinx()
ax2.plot(Data['SimTime'], [sum(Data['HoAdjAu'][x]) for x in range(len(Data['HoAdjAu']))] , 'blue', label = 'Au only')
ax3 = ax.twiny()
ax3.plot(NewHe, label = 'Hetero pairs only', color='red')
ax2.axes.yaxis.set_visible(False)
ax3.axes.xaxis.set_visible(False)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Number of pairs')
fig.legend(bbox_to_anchor=(0, 1.0, 1., 0), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(System['base_dir']+System['plot_dir'] + '/Pairs.png', dpi = 100, bbox_inches='tight')
def All_CNA_Traj(System, Pipeline, outfile):
CNA = []
for x in System['iter_dir']:
for y in [(4,2,2), (4,2,1), (3,1,1)]:
Index = Pipeline.BigMeta[x]['cna'][0][0].index(y)
Temp = [ Pipeline.BigMeta[x]['cna'][i][1][Index] for i in range(len(Pipeline.BigMeta[x]['cna'])) ]
CNA.append(Temp)
x = Pipeline.BigMeta[System['iter_dir'][0]]['SimTime']
fig, axs = plt.subplots(2, 2, sharex='col', sharey='row')
fig.set_size_inches(9,3)
(ax1, ax2), (ax3, ax4) = axs
ax1.plot(x, CNA[0], label = '(4 2 2)')
ax1.plot(x, CNA[1], label = '(4 2 1)')
ax1.plot(x, CNA[2], label = '(3 1 1)')
ax2.plot(x, CNA[3])
ax2.plot(x, CNA[4])
ax2.plot(x, CNA[5])
ax3.plot(x, CNA[6])
ax3.plot(x, CNA[7])
ax3.plot(x, CNA[8])
ax4.plot(x, CNA[9])
ax4.plot(x, CNA[10])
ax4.plot(x, CNA[11])
for ax in axs.flat:
ax.label_outer()
ax.set_ylim(0, 0.7)
fig.legend( loc='upper center', ncol=3, fontsize = 10)
plt.savefig(outfile, dpi = 100, bbox_inches='tight')
"""
##########################################################################
The following are old functions with little utility but may be
reintroduced if there is demand for such things.
def AGCN_Excess():
Excess = []
for i in range( len( AverageMeta['agcn'] ) ):
Temp = [ a>12 for a in AverageMeta['agcn'][i] ]
Excess.append(np.sum(Temp))
return Excess
def Strange_CNA():
Indices = [ 14, 15, 24, 25, 38 ] #37 and on to the end are all odd
CNA = AverageMeta['cna'] # All of the heights
Strange_Dict = {}
for Index in Indices:
Strange_Dict[AverageMeta['masterkey'][Index]] = np.zeros((len(CNA)), dtype = np.float64)
for Key in AverageMeta['masterkey'][Indices[-1]:]:
Strange_Dict[Key] = np.zeros((len(CNA)), dtype = np.float64)
Key = list(Strange_Dict.keys())
Mast = AverageMeta['masterkey']
for frame in range(len(CNA)):
for Sig in CNA[frame]:
for obj in Key:
if list(CNA[frame]).index(Sig) == Mast.index(obj):
if Sig > 0:
Strange_Dict[obj][frame] = 1
Bar_Heights = []
for Item in Strange_Dict:
Bar_Heights.append( np.sum(Strange_Dict[Item]) )
return (Strange_Dict.keys(), Bar_Heights)
fig, ax = plt.subplots()
fig.set_size_inches((21,7))
ax.plot(New, label = '(4,5,5)', color='k')
Ticks = range(0,1500,50)
for tick in Ticks:
ax.vlines(tick, ymin=0, ymax = 1.1*np.amax(New), color='r', linestyle = '--')
ax2 = ax.twinx()
ax2.scatter(Ticks, AverageMeta['R_Cut'], linewidths = 6, color='g')
ax.tick_params(axis = 'both', which = 'major', labelsize = 20)
ax2.tick_params(axis = 'both', which = 'major', labelsize = 20)
ax2.set_ylabel("Nearest neighbour cutoff (Angstrom)", fontsize = 20)
ax.set_xlabel("Time (ps)", fontsize = 20)
ax.set_ylabel("Probability", fontsize = 20)
fig, ax = plt.subplots()
fig.set_size_inches((21,7))
rect =ax.bar(X_Key, B, tick_label = X_Key)
plt.xticks(rotation = 90)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("CNA signature", fontsize = 20)
ax.set_ylabel("Number of frames", fontsize=20)
autolabel(rect)
def cna_plotter(Frame):
X_CNA = [ str(a) for a in AverageMeta['masterkey'][:36] ] # Create a set of ticks for the x-axis
fig = plt.figure(figsize = (9,3) )
ax = plt.bar( X_CNA, AverageMeta['cna'][Frame][:36], tick_label = X_CNA )
plt.xlabel("CNA Signature", fontsize = 12)
plt.ylabel("Probability", fontsize = 12)
plt.xticks(rotation=90,fontsize = 14)
plt.text( X_CNA[20], 0.8*np.amax(AverageMeta['cna'][Frame]), 'Time: %sps\nTemp: %sK' %(AverageMeta["SimTime"][Frame], AverageMeta['Temp'][Frame]) )
plt.savefig(path + 'Images/'+ 'CNA'+str(Frame)+'.png', dpi = 100, bbox_inches='tight')
##########################################################################
""" | StarcoderdataPython |
1959893 | <gh_stars>0
import os
from setuptools import setup
setup(
use_scm_version={
'write_to': os.path.join('Chromos', '_version.py')
}
)
| StarcoderdataPython |
6456847 | <reponame>kiss2u/google-research<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper file to run the discover concept algorithm in the AwA dataset."""
# lint as: python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import pickle
from absl import app
import keras
import keras.backend as K
from keras.layers import Dense
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
import tensorflow.compat.v1 as tf
def load_model(classes):
"""Loads the pretrained model."""
model = keras.applications.inception_v3.InceptionV3(include_top=True,
weights='imagenet')
model.layers.pop()
for layer in model.layers:
layer.trainable = False
last = model.layers[-1].output
dense1 = Dense(1024, activation='relu', name='concept1')
dense2 = Dense(1024, activation='relu', name='concept2')
fc1 = dense1(last)
fc2 = dense2(fc1)
predict = Dense(len(classes), name='output')
logits = predict(fc2)
def cross_entropy_loss():
# Returns the cross entropy loss.
def loss(y_true, y_pred):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y_true, logits=y_pred))
return loss
finetuned_model = Model(model.input, logits)
finetuned_model.compile(
optimizer=SGD(lr=0.01),
loss=cross_entropy_loss(),
metrics=['accuracy', 'top_k_categorical_accuracy'])
finetuned_model.classes = classes
finetuned_model.load_weights('inception_final.h5')
feature_dense_model = Model(model.input, fc1)
fc1_input = Input(shape=(1024,))
fc2_temp = dense2(fc1_input)
logits_temp = predict(fc2_temp)
fc_model = Model(fc1_input, logits_temp)
fc_model.compile(
optimizer=SGD(lr=0.01),
loss=cross_entropy_loss(),
metrics=['accuracy', 'top_k_categorical_accuracy'])
for layer in finetuned_model.layers:
layer.trainable = False
return finetuned_model, feature_dense_model, fc_model, dense2, predict
def load_data(train_dir, size, batch_size,
pretrained=True, noise=0.):
"""Loads data and adding noise."""
def rand_noise(img):
img_noisy = img + np.random.normal(scale=noise, size=img.shape)
return img_noisy
if not pretrained:
gen = keras.preprocessing.image.ImageDataGenerator(validation_split=0.1)
gen_noisy = keras.preprocessing.image.ImageDataGenerator(
validation_split=0.1, preprocessing_function=rand_noise)
aug = keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.1)
batches = aug.flow_from_directory(
train_dir,
target_size=size,
class_mode='categorical',
shuffle=True,
batch_size=batch_size,
subset='training')
batches_fix_train = gen.flow_from_directory(
train_dir,
target_size=size,
class_mode='categorical',
shuffle=False,
batch_size=batch_size,
subset='training')
batches_fix_val = gen_noisy.flow_from_directory(
train_dir,
target_size=size,
class_mode='categorical',
shuffle=False,
batch_size=batch_size,
subset='validation')
classes = list(iter(batches.class_indices))
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
num_train_samples = sum([len(files) for _, _, files in os.walk(train_dir)])
num_train_steps = math.floor(num_train_samples * 0.9 / batch_size)
num_valid_steps = math.floor(num_train_samples * 0.1 / batch_size)
y_train = batches_fix_train.classes
y_val = batches_fix_val.classes
_, feature_dense_model, _, \
dense2, predict = load_model(classes)
f_train = feature_dense_model.predict_generator(
batches_fix_train,
steps=num_train_steps,
workers=40,
use_multiprocessing=False)
f_val = feature_dense_model.predict_generator(
batches_fix_val,
steps=num_valid_steps,
workers=40,
use_multiprocessing=False)
y_train_logit = tf.keras.utils.to_categorical(
y_train[:f_train.shape[0]], num_classes=50)
y_val_logit = tf.keras.utils.to_categorical(
y_val[:f_val.shape[0]],
num_classes=50,
)
np.save('y_train_logit.npy', y_train_logit)
np.save('y_val_logit.npy', y_val_logit)
np.save('y_train.npy', y_train)
np.save('y_val.npy', y_val)
np.save('f_train.npy', f_train)
np.save('f_val.npy', f_val)
with open('classes.pickle', 'wb') as handle:
pickle.dump(classes, handle, pickle.HIGHEST_PROTOCOL)
else:
with open('classes.pickle', 'rb') as handle:
classes = pickle.load(handle)
_, feature_dense_model, _, \
dense2, predict = load_model(classes)
y_train_logit = np.load('y_train_logit.npy')
y_val_logit = np.load('y_val_logit.npy')
y_train = np.load('y_train.npy')
y_val = np.load('y_val.npy')
f_train = np.load('f_train.npy')
f_val = np.load('f_val.npy')
return y_train_logit, y_val_logit, y_train, y_val, \
f_train, f_val, dense2, predict
def target_category_loss(x, category_index, nb_classes):
return x* K.one_hot([category_index], nb_classes)
def get_ace_concept(concept_arraynew_active, dense2, predict, f_train,
concepts_to_select):
"""Calculates the ACE concepts."""
concept_input = Input(shape=(1024,), name='concept_input')
fc2_tcav = dense2(concept_input)
softmax_tcav = predict(fc2_tcav)
tcav_model = Model(inputs=concept_input, outputs=softmax_tcav)
tcav_model.layers[-1].activation = None
tcav_model.layers[-1].trainable = False
tcav_model.layers[-2].trainable = False
tcav_model.compile(
loss='mean_squared_error',
optimizer=SGD(lr=0.0),
metrics=['binary_accuracy'])
tcav_model.summary()
n_cluster = concept_arraynew_active.shape[0]
n_percluster = concept_arraynew_active.shape[1]
print(concept_arraynew_active.shape)
weight_ace = np.zeros((1024, n_cluster))
tcav_list_rand = np.zeros((50, 200))
tcav_list_ace = np.zeros((50, 134))
for i in range(n_cluster):
y = np.zeros((n_cluster * n_percluster))
y[i * n_percluster:(i + 1) * n_percluster] = 1
clf = LogisticRegression(
random_state=0,
solver='lbfgs',
max_iter=10000,
C=10.0,
multi_class='ovr').fit(concept_arraynew_active.reshape((-1, 1024)), y)
weight_ace[:, i] = clf.coef_
weight_rand = np.zeros((1024, 200))
for i in range(200):
y = np.random.randint(2, size=n_cluster * n_percluster)
clf = LogisticRegression(
random_state=0,
solver='lbfgs',
max_iter=10000,
C=10.0,
multi_class='ovr').fit(concept_arraynew_active.reshape((-1, 1024)), y)
weight_rand[:, i] = clf.coef_
sig_list = np.zeros(n_cluster)
for j in range(50):
grads = (
K.gradients(target_category_loss(softmax_tcav, j, 50),
concept_input)[0])
gradient_function = K.function([tcav_model.input], [grads])
grads_val = gradient_function([f_train])[0]
grad_rand = np.matmul(grads_val, weight_rand)
grad_ace = np.matmul(grads_val, weight_ace)
tcav_list_rand[j, :] = np.sum(grad_rand > 0.000, axis=(0))
tcav_list_ace[j, :] = np.sum(grad_ace > 0.000, axis=(0))
mean = np.mean(tcav_list_rand[j, :])
std = np.std(tcav_list_rand[j, :])
sig_list += (tcav_list_ace[j, :] > mean + std * 2.0).astype(int)
top_k_index = np.array(sig_list).argsort()[-1 * concepts_to_select:][::-1]
print(sig_list)
print(top_k_index)
return weight_ace[:, top_k_index]
def get_pca_concept(f_train, concepts_to_select):
pca = PCA()
pca.fit(f_train)
weight_pca = np.zeros((1024, concepts_to_select))
for count, pc in enumerate(pca.components_):
if count >= concepts_to_select:
break
weight_pca[:, count] = pc
return weight_pca
def load_conceptarray():
"""Loads a preprocessed concept array (by running code of ACE)."""
concept_arraynew = np.load('concept_arraynew.npy')
with open('concept_list.pickle', 'rb') as handle:
concept_list = pickle.load(handle)
with open('active_list.pickle', 'rb') as handle:
active_list = pickle.load(handle)
concept_arraynew_active = np.load('concept_arraynew_active.npy')
return concept_arraynew, concept_arraynew_active, concept_list, active_list
def plot_nearestneighbors(concept_arraynew_active, concept_matrix, concept_list,
active_list, filename='top_concepts_AwA'):
"""Plots nearest neighbors."""
simarray1 = np.mean(
np.matmul(concept_arraynew_active, concept_matrix), axis=1)
simarray1 = simarray1 - np.mean(simarray1, axis=(0))
simarray_0mean_unitnorm = (simarray1 / np.linalg.norm(simarray1, axis=0))
top_cluster = (np.argmax(np.abs(simarray_0mean_unitnorm), axis=0))
for top in top_cluster:
print(concept_list[active_list[top]])
neglist = np.abs(np.min(simarray_0mean_unitnorm, axis=0)) > np.max(
simarray_0mean_unitnorm, axis=0)
fig = plt.figure(figsize=(18, 28))
for topc, top in enumerate(top_cluster):
mypath = './work_dir/concepts/concept_fc1_' + concept_list[
active_list[top]] + '/'
onlyfiles = [f for f in os.listdir(mypath) if
os.path.isfile(os.path.join(mypath, f))]
top_image = np.matmul(concept_arraynew_active[top, :, :],
concept_matrix[:, topc:topc + 1])
if not neglist[topc]:
top_idx = np.argsort(top_image[:, 0])[::-1][:8]
else:
top_idx = np.argsort(top_image[:, 0])[:8]
tempcount = 0
for count, image in enumerate(onlyfiles):
if count in top_idx:
tempcount += 1
fig.add_subplot(8, 8, 8 * topc + tempcount)
img = mpimg.imread(os.path.join(mypath, image))
plt.imshow(img)
plt.axis('off')
plt.savefig(filename)
def main(_):
return
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
6410423 | # cook your dish here
for i in range(int(input())):
x=int(input())
if x%4!=0:
print("NO")
continue
else:
print("YES")
A=[]
B=[]
for i in range(0,x//4):
A.append(i+1)
A.append(x-i)
for i in range(x//4,(x//2)):
B.append(i+1)
B.append(x-i)
A.sort()
B.sort()
for i in A:
print(i,end=" ")
print()
for i in B:
print(i,end=" ")
print()
| StarcoderdataPython |
4814012 |
from ..utils import Object
class GetChatAdministrators(Object):
"""
Returns a list of administrators of the chat with their custom titles
Attributes:
ID (:obj:`str`): ``GetChatAdministrators``
Args:
chat_id (:obj:`int`):
Chat identifier
Returns:
ChatAdministrators
Raises:
:class:`telegram.Error`
"""
ID = "getChatAdministrators"
def __init__(self, chat_id, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
@staticmethod
def read(q: dict, *args) -> "GetChatAdministrators":
chat_id = q.get('chat_id')
return GetChatAdministrators(chat_id)
| StarcoderdataPython |
8167530 | """
clear database script
"""
from storage import RedisClient
from configparser import ConfigParser, NoOptionError
def try_to_get_options(cfg_func, section, key):
if not callable(cfg_func):
raise TypeError('cfg_func must a config parse get* function')
try:
value = cfg_func(section, key)
return value
except NoOptionError:
return None
while True:
path = input('Please input config file path(if you use default file type \'d\'.): ')
if path == 'd':
path = 'proxy.conf'
sure = input('Are you sure the config file in \'{}\'. [y/n]: '.format(path))
if sure == 'y':
break;
cfg = ConfigParser()
cfg.read(path)
REDIS_HOST = try_to_get_options(cfg.get, 'redis', 'host')
REDIS_PORT = try_to_get_options(cfg.getint, 'redis', 'port')
REDIS_PASSWORD = try_to_get_options(cfg.get, 'redis', 'password')
REDIS_KEY = try_to_get_options(cfg.get, 'redis', 'key')
redis_client = RedisClient(host=REDIS_HOST, port=REDIS_PORT, password=<PASSWORD>, s_key=REDIS_KEY)
count = redis_client.count()
if count == 0:
print('Already cleaning!')
else:
redis_client.show()
sure = input('Are you sure remove that data? amount {} items! [y/n]: '.format(count))
if sure == 'y':
redis_client.remove_by_range(0, 100)
else:
print('Good luck! Bye Bye')
| StarcoderdataPython |
4971780 | from qulab.math.qst import *
import numpy as np
def randomRho(n):
v = np.random.rand(1, 2**n) + np.random.rand(1, 2**n) * 1j - 0.5 - 0.5j
return np.conj(v).T.dot(v) / np.conj(v).dot(v.T)
def aquireData(rho, transform):
mat = transformMatrix(transform)
ret = mat.dot(rho.dot(np.conj(mat.T)))
ret = np.real(np.diag(ret))
return ret[1:]
def test_qst():
n = 4
rho = randomRho(n)
V = rhoToV(rho)
P = []
for index, transform in enumerate(transformList(n)):
z = aquireData(rho, transform)
P.extend(list(z))
v = acquireVFromData(n, P)
assert np.sum((v - V)**2) < 1e-12
assert np.max(np.abs(rho - vToRho(v))) < 1e-6
| StarcoderdataPython |
1826055 | import os
import platform
import sys
from cffi import FFI
__all__ = ['ffi', 'libca']
ffi = FFI()
# cadef.h
ffi.cdef("""
typedef void *chid;
typedef chid chanId;
typedef long chtype;
typedef double ca_real;
typedef void *evid;
/* arguments passed to user connection handlers */
struct connection_handler_args {
chanId chid; /* channel id */
long op; /* one of CA_OP_CONN_UP or CA_OP_CONN_DOWN */
};
typedef void caCh (struct connection_handler_args args);
/* CFFI does not support bit field */
/*
typedef struct ca_access_rights {
unsigned read_access:1;
unsigned write_access:1;
} caar;
*/
typedef struct ca_access_rights {
unsigned access;
} caar;
/* arguments passed to user access rights handlers */
struct access_rights_handler_args {
chanId chid; /* channel id */
caar ar; /* new access rights state */
};
typedef void caArh (struct access_rights_handler_args args);
/*
* Arguments passed to event handlers and get/put call back handlers.
*
* The status field below is the CA ECA_XXX status of the requested
* operation which is saved from when the operation was attempted in the
* server and copied back to the clients call back routine.
* If the status is not ECA_NORMAL then the dbr pointer will be NULL
* and the requested operation can not be assumed to be successful.
*/
typedef struct event_handler_args {
void *usr; /* user argument supplied with request */
chanId chid; /* channel id */
long type; /* the type of the item returned */
long count; /* the element count of the item returned */
const void *dbr; /* a pointer to the item returned */
int status; /* ECA_XXX status of the requested op from the server */
} evargs;
typedef void caEventCallBackFunc (struct event_handler_args);
void ca_test_event
(
struct event_handler_args
);
/* arguments passed to user exception handlers */
struct exception_handler_args {
void *usr; /* user argument supplied when installed */
chanId chid; /* channel id (may be nill) */
long type; /* type requested */
long count; /* count requested */
void *addr; /* user's address to write results of CA_OP_GET */
long stat; /* channel access ECA_XXXX status code */
long op; /* CA_OP_GET, CA_OP_PUT, ..., CA_OP_OTHER */
const char *ctx; /* a character string containing context info */
const char *pFile; /* source file name (may be NULL) */
unsigned lineNo; /* source file line number (may be zero) */
};
typedef unsigned CA_SYNC_GID;
/*
* External OP codes for CA operations
*/
#define CA_OP_GET 0
#define CA_OP_PUT 1
#define CA_OP_CREATE_CHANNEL 2
#define CA_OP_ADD_EVENT 3
#define CA_OP_CLEAR_EVENT 4
#define CA_OP_OTHER 5
/*
* used with connection_handler_args
*/
#define CA_OP_CONN_UP 6
#define CA_OP_CONN_DOWN 7
/* depricated */
#define CA_OP_SEARCH 2
short ca_field_type(chid chan);
unsigned long ca_element_count(chid chan);
const char * ca_name (chid chan);
void ca_set_puser (chid chan, void *puser);
void * ca_puser (chid chan);
unsigned ca_read_access (chid chan);
unsigned ca_write_access (chid chan);
/*
* cs_ - `channel state'
*
* cs_never_conn valid chid, IOC not found
* cs_prev_conn valid chid, IOC was found, but unavailable
* cs_conn valid chid, IOC was found, still available
* cs_closed channel deleted by user
*/
enum channel_state {cs_never_conn, cs_prev_conn, cs_conn, cs_closed};
enum channel_state ca_state (chid chan);
/************************************************************************/
/* Perform Library Initialization */
/* */
/* Must be called once before calling any of the other routines */
/************************************************************************/
enum ca_preemptive_callback_select
{ ca_disable_preemptive_callback, ca_enable_preemptive_callback };
int ca_context_create(enum ca_preemptive_callback_select enable_premptive);
void ca_detach_context ();
/************************************************************************/
/* Remove CA facility from your task */
/* */
/* Normally called automatically at task exit */
/************************************************************************/
void ca_context_destroy (void);
typedef unsigned capri;
#define CA_PRIORITY_MAX 99
#define CA_PRIORITY_MIN 0
#define CA_PRIORITY_DEFAULT 0
#define CA_PRIORITY_DB_LINKS 80
#define CA_PRIORITY_ARCHIVE 20
#define CA_PRIORITY_OPI 0
/*
* ca_create_channel ()
*
* pChanName R channel name string
* pConnStateCallback R address of connection state change
* callback function
* pUserPrivate R placed in the channel's user private field
* o can be fetched later by ca_puser(CHID)
* o passed as void * arg to *pConnectCallback above
* priority R priority level in the server 0 - 100
* pChanID RW channel id written here
*/
int ca_create_channel
(
const char *pChanName,
caCh *pConnStateCallback,
void *pUserPrivate,
capri priority,
void *pChanID
);
/*
* ca_change_connection_event()
*
* chan R channel identifier
* pfunc R address of connection call-back function
*/
int ca_change_connection_event
(
chid chan,
caCh * pfunc
);
/*
* ca_replace_access_rights_event ()
*
* chan R channel identifier
* pfunc R address of access rights call-back function
*/
int ca_replace_access_rights_event (
chid chan,
caArh *pfunc
);
/*
* ca_add_exception_event ()
*
* replace the default exception handler
*
* pfunc R address of exception call-back function
* pArg R copy of this pointer passed to exception
* call-back function
*/
typedef void caExceptionHandler (struct exception_handler_args);
int ca_add_exception_event
(
caExceptionHandler *pfunc,
void *pArg
);
/*
* ca_clear_channel()
* - deallocate resources reserved for a channel
*
* chanId R channel ID
*/
int ca_clear_channel
(
chid chanId
);
/************************************************************************/
/* Write a value to a channel */
/************************************************************************/
/*
* ca_array_put()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
*/
int ca_array_put
(
chtype type,
unsigned long count,
chid chanId,
const void * pValue
);
/*
* ca_array_put_callback()
*
* This routine functions identically to the original ca put request
* with the addition of a callback to the user supplied function
* after recod processing completes in the IOC. The arguments
* to the user supplied callback function are declared in
* the structure event_handler_args and include the pointer
* sized user argument supplied when ca_array_put_callback() is called.
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
*/
int ca_array_put_callback
(
chtype type,
unsigned long count,
chid chanId,
const void * pValue,
caEventCallBackFunc * pFunc,
void * pArg
);
/************************************************************************/
/* Read a value from a channel */
/************************************************************************/
/*
* ca_array_get()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue W channel value copied to this location
*/
int ca_array_get
(
long type,
unsigned long count,
chid chanId,
void * pValue
);
/************************************************************************/
/* Read a value from a channel and run a callback when the value */
/* returns */
/* */
/* */
/************************************************************************/
/*
* ca_array_get_callback()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
*/
int ca_array_get_callback
(
chtype type,
unsigned long count,
chid chanId,
caEventCallBackFunc * pFunc,
void * pArg
);
/************************************************************************/
/* Specify a function to be executed whenever significant changes */
/* occur to a channel. */
/* NOTES: */
/* 1) Evid may be omited by passing a NULL pointer */
/* */
/* 2) An array count of zero specifies the native db count */
/* */
/************************************************************************/
/*
* ca_create_subscription ()
*
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* mask R event mask - one of {DBE_VALUE, DBE_ALARM, DBE_LOG}
* pFunc R pointer to call-back function
* pArg R copy of this pointer passed to pFunc
* pEventID W event id written at specified address
*/
int ca_create_subscription
(
chtype type,
unsigned long count,
chid chanId,
long mask,
caEventCallBackFunc * pFunc,
void * pArg,
evid * pEventID
);
/************************************************************************/
/* Remove a function from a list of those specified to run */
/* whenever significant changes occur to a channel */
/* */
/************************************************************************/
/*
* ca_clear_subscription()
*
* eventID R event id
*/
int ca_clear_subscription
(
evid eventId
);
chid ca_evid_to_chid ( evid id );
/************************************************************************/
/* */
/* Requested data is not necessarily stable prior to */
/* return from called subroutine. Call ca_pend_io() */
/* to guarantee that requested data is stable. Call the routine */
/* ca_flush_io() to force all outstanding requests to be */
/* sent out over the network. Significant increases in */
/* performance have been measured when batching several remote */
/* requests together into one message. Additional */
/* improvements can be obtained by performing local processing */
/* in parallel with outstanding remote processing. */
/* */
/* FLOW OF TYPICAL APPLICATION */
/* */
/* search() ! Obtain Channel ids */
/* . ! " */
/* . ! " */
/* pend_io ! wait for channels to connect */
/* */
/* get() ! several requests for remote info */
/* get() ! " */
/* add_event() ! " */
/* get() ! " */
/* . */
/* . */
/* . */
/* flush_io() ! send get requests */
/* ! optional parallel processing */
/* . ! " */
/* . ! " */
/* pend_io() ! wait for replies from get requests */
/* . ! access to requested data */
/* . ! " */
/* pend_event() ! wait for requested events */
/* */
/************************************************************************/
/************************************************************************/
/* These routines wait for channel subscription events and call the */
/* functions specified with add_event when events occur. If the */
/* timeout is specified as 0 an infinite timeout is assumed. */
/* ca_flush_io() is called by this routine. If ca_pend_io () */
/* is called when no IO is outstanding then it will return immediately */
/* without processing. */
/************************************************************************/
/*
* ca_pend_event()
*
* timeOut R wait for this delay in seconds
*/
int ca_pend_event(ca_real timeOut);
/*
* ca_pend_io()
*
* timeOut R wait for this delay in seconds but return early
* if all get requests (or search requests with null
* connection handler pointer have completed)
*/
int ca_pend_io(ca_real timeOut);
/* calls ca_pend_io() if early is true otherwise ca_pend_event() is called */
int ca_pend (ca_real timeout, int early);
/*
* ca_test_io()
*
* returns TRUE when get requests (or search requests with null
* connection handler pointer) are outstanding
*/
int ca_test_io (void);
/************************************************************************/
/* Send out all outstanding messages in the send queue */
/************************************************************************/
/*
* ca_flush_io()
*/
int ca_flush_io();
/*
* ca_host_name_function()
*
* channel R channel identifier
*
* !!!! this function is _not_ thread safe !!!!
*/
const char * ca_host_name (chid channel);
/* thread safe version */
unsigned ca_get_host_name ( chid pChan,
char *pBuf, unsigned bufLength );
/*
* ca_replace_printf_handler ()
*
* for apps that want to change where ca formatted
* text output goes
*
* use two ifdef's for trad C compatibility
*
* ca_printf_func R pointer to new function called when
* CA prints an error message
*/
/*
typedef int caPrintfFunc (const char *pformat, va_list args);
int ca_replace_printf_handler (
caPrintfFunc *ca_printf_func
);
*/
/*
* CA synch groups
*
* This facility will allow the programmer to create
* any number of synchronization groups. The programmer might then
* interleave IO requests within any of the groups. Once The
* IO operations are initiated then the programmer is free to
* block for IO completion within any one of the groups as needed.
*/
/*
* ca_sg_create()
*
* create a sync group
*
* pgid W pointer to sync group id that will be written
*/
int ca_sg_create (CA_SYNC_GID * pgid);
/*
* ca_sg_delete()
*
* delete a sync group
*
* gid R sync group id
*/
int ca_sg_delete (const CA_SYNC_GID gid);
/*
* ca_sg_block()
*
* block for IO performed within a sync group to complete
*
* gid R sync group id
* timeout R wait for this duration prior to timing out
* and returning ECA_TIMEOUT
*/
int ca_sg_block (const CA_SYNC_GID gid, ca_real timeout);
/*
* ca_sg_test()
*
* test for sync group IO operations in progress
*
* gid R sync group id
*
* returns one of ECA_BADSYNCGRP, ECA_IOINPROGRESS, ECA_IODONE
*/
int ca_sg_test (const CA_SYNC_GID gid);
/*
* ca_sg_reset
*
* gid R sync group id
*/
int ca_sg_reset(const CA_SYNC_GID gid);
/*
* ca_sg_array_get()
*
* initiate a get within a sync group
* (essentially a ca_array_get() with a sync group specified)
*
* gid R sync group id
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue W channel value copied to this location
*/
int ca_sg_array_get
(
const CA_SYNC_GID gid,
chtype type,
unsigned long count,
chid chan,
void *pValue
);
/*
* ca_sg_array_put()
*
* initiate a put within a sync group
* (essentially a ca_array_put() with a sync group specified)
*
* gid R sync group id
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
*/
int ca_sg_array_put
(
const CA_SYNC_GID gid,
chtype type,
unsigned long count,
chid chan,
const void *pValue
);
/*
* ca_sg_stat()
*
* print status of a sync group
*
* gid R sync group id
*/
int ca_sg_stat (CA_SYNC_GID gid);
/*
* used when an auxillary thread needs to join a CA client context started
* by another thread
*/
struct ca_client_context * ca_current_context ();
int ca_attach_context ( struct ca_client_context * context );
int ca_client_status ( unsigned level );
int ca_context_status ( struct ca_client_context *, unsigned level );
const char * ca_message(long ca_status);
/*
* ca_version()
*
* returns the CA version string
*/
const char * ca_version (void);
""")
# alarm.h
ffi.cdef("""
#define NO_ALARM 0
/* ALARM SEVERITIES - must match menuAlarmSevr.dbd */
typedef enum {
epicsSevNone = NO_ALARM,
epicsSevMinor,
epicsSevMajor,
epicsSevInvalid,
ALARM_NSEV
} epicsAlarmSeverity;
/* ALARM STATUS - must match menuAlarmStat.dbd */
typedef enum {
epicsAlarmNone = NO_ALARM,
epicsAlarmRead,
epicsAlarmWrite,
epicsAlarmHiHi,
epicsAlarmHigh,
epicsAlarmLoLo,
epicsAlarmLow,
epicsAlarmState,
epicsAlarmCos,
epicsAlarmComm,
epicsAlarmTimeout,
epicsAlarmHwLimit,
epicsAlarmCalc,
epicsAlarmScan,
epicsAlarmLink,
epicsAlarmSoft,
epicsAlarmBadSub,
epicsAlarmUDF,
epicsAlarmDisable,
epicsAlarmSimm,
epicsAlarmReadAccess,
epicsAlarmWriteAccess,
ALARM_NSTATUS
} epicsAlarmCondition;
""")
# caeventmask.h
ffi.cdef("""
#define DBE_VALUE 1
#define DBE_ARCHIVE 2
#define DBE_LOG 2
#define DBE_ALARM 4
#define DBE_PROPERTY 8
""")
# epicsTypes.h
ffi.cdef("""
typedef int8_t epicsInt8;
typedef uint8_t epicsUInt8;
typedef int16_t epicsInt16;
typedef uint16_t epicsUInt16;
typedef epicsUInt16 epicsEnum16;
typedef int32_t epicsInt32;
typedef uint32_t epicsUInt32;
typedef int64_t epicsInt64;
typedef uint64_t epicsUInt64;
typedef float epicsFloat32;
typedef double epicsFloat64;
typedef epicsInt32 epicsStatus;
typedef struct {
unsigned length;
char *pString;
}epicsString;
/*
* !! Dont use this - it may vanish in the future !!
*
* Provided only for backwards compatibility with
* db_access.h
*
*/
typedef char epicsOldString[40];
""")
# epicsTime.h
ffi.cdef("""
/* epics time stamp for C interface*/
typedef struct epicsTimeStamp {
epicsUInt32 secPastEpoch; /* seconds since 0000 Jan 1, 1990 */
epicsUInt32 nsec; /* nanoseconds within second */
} epicsTimeStamp;
""")
# db_access.h
ffi.cdef("""
/*
* architecture independent types
*
* (so far this is sufficient for all archs we have ported to)
*/
typedef epicsOldString dbr_string_t;
typedef epicsUInt8 dbr_char_t;
typedef epicsInt16 dbr_short_t;
typedef epicsUInt16 dbr_ushort_t;
typedef epicsInt16 dbr_int_t;
typedef epicsUInt16 dbr_enum_t;
typedef epicsInt32 dbr_long_t;
typedef epicsUInt32 dbr_ulong_t;
typedef epicsFloat32 dbr_float_t;
typedef epicsFloat64 dbr_double_t;
typedef epicsUInt16 dbr_put_ackt_t;
typedef epicsUInt16 dbr_put_acks_t;
typedef epicsOldString dbr_stsack_string_t;
typedef epicsOldString dbr_class_name_t;
/* VALUES WITH STATUS STRUCTURES */
/* structure for a string status field */
struct dbr_sts_string {
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_string_t value; /* current value */
};
/* structure for a string status and ack field */
struct dbr_stsack_string{
dbr_ushort_t status; /* status of value */
dbr_ushort_t severity; /* severity of alarm */
dbr_ushort_t ackt; /* ack transient? */
dbr_ushort_t acks; /* ack severity */
dbr_string_t value; /* current value */
};
/* structure for an short status field */
struct dbr_sts_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t value; /* current value */
};
struct dbr_sts_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t value; /* current value */
};
/* structure for a float status field */
struct dbr_sts_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_float_t value; /* current value */
};
/* structure for a enum status field */
struct dbr_sts_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_enum_t value; /* current value */
};
/* structure for a char status field */
struct dbr_sts_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_char_t RISC_pad; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a long status field */
struct dbr_sts_long{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_long_t value; /* current value */
};
/* structure for a double status field */
struct dbr_sts_double{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_long_t RISC_pad; /* RISC alignment */
dbr_double_t value; /* current value */
};
/* VALUES WITH STATUS AND TIME STRUCTURES */
/* structure for a string time field */
struct dbr_time_string{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_string_t value; /* current value */
};
/* structure for an short time field */
struct dbr_time_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_short_t value; /* current value */
};
struct dbr_time_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_short_t value; /* current value */
};
/* structure for a float time field */
struct dbr_time_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_float_t value; /* current value */
};
/* structure for a enum time field */
struct dbr_time_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_enum_t value; /* current value */
};
/* structure for a char time field */
struct dbr_time_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad0; /* RISC alignment */
dbr_char_t RISC_pad1; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a long time field */
struct dbr_time_long{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_long_t value; /* current value */
};
/* structure for a double time field */
struct dbr_time_double{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_long_t RISC_pad; /* RISC alignment */
dbr_double_t value; /* current value */
};
/* VALUES WITH STATUS AND GRAPHIC STRUCTURES */
/* structure for a graphic string */
/* not implemented; use struct_dbr_sts_string */
/* structure for a graphic short field */
struct dbr_gr_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_short_t upper_disp_limit; /* upper limit of graph */
dbr_short_t lower_disp_limit; /* lower limit of graph */
dbr_short_t upper_alarm_limit;
dbr_short_t upper_warning_limit;
dbr_short_t lower_warning_limit;
dbr_short_t lower_alarm_limit;
dbr_short_t value; /* current value */
};
struct dbr_gr_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_short_t upper_disp_limit; /* upper limit of graph */
dbr_short_t lower_disp_limit; /* lower limit of graph */
dbr_short_t upper_alarm_limit;
dbr_short_t upper_warning_limit;
dbr_short_t lower_warning_limit;
dbr_short_t lower_alarm_limit;
dbr_short_t value; /* current value */
};
/* structure for a graphic floating point field */
struct dbr_gr_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t precision; /* number of decimal places */
dbr_short_t RISC_pad0; /* RISC alignment */
char units[8]; /* units of value */
dbr_float_t upper_disp_limit; /* upper limit of graph */
dbr_float_t lower_disp_limit; /* lower limit of graph */
dbr_float_t upper_alarm_limit;
dbr_float_t upper_warning_limit;
dbr_float_t lower_warning_limit;
dbr_float_t lower_alarm_limit;
dbr_float_t value; /* current value */
};
/* structure for a graphic enumeration field */
struct dbr_gr_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t no_str; /* number of strings */
char strs[16][26];
/* state strings */
dbr_enum_t value; /* current value */
};
/* structure for a graphic char field */
struct dbr_gr_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_char_t upper_disp_limit; /* upper limit of graph */
dbr_char_t lower_disp_limit; /* lower limit of graph */
dbr_char_t upper_alarm_limit;
dbr_char_t upper_warning_limit;
dbr_char_t lower_warning_limit;
dbr_char_t lower_alarm_limit;
dbr_char_t RISC_pad; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a graphic long field */
struct dbr_gr_long{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_long_t upper_disp_limit; /* upper limit of graph */
dbr_long_t lower_disp_limit; /* lower limit of graph */
dbr_long_t upper_alarm_limit;
dbr_long_t upper_warning_limit;
dbr_long_t lower_warning_limit;
dbr_long_t lower_alarm_limit;
dbr_long_t value; /* current value */
};
/* structure for a graphic double field */
struct dbr_gr_double{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t precision; /* number of decimal places */
dbr_short_t RISC_pad0; /* RISC alignment */
char units[8]; /* units of value */
dbr_double_t upper_disp_limit; /* upper limit of graph */
dbr_double_t lower_disp_limit; /* lower limit of graph */
dbr_double_t upper_alarm_limit;
dbr_double_t upper_warning_limit;
dbr_double_t lower_warning_limit;
dbr_double_t lower_alarm_limit;
dbr_double_t value; /* current value */
};
/* VALUES WITH STATUS, GRAPHIC and CONTROL STRUCTURES */
/* structure for a control string */
/* not implemented; use struct_dbr_sts_string */
/* structure for a control integer */
struct dbr_ctrl_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_short_t upper_disp_limit; /* upper limit of graph */
dbr_short_t lower_disp_limit; /* lower limit of graph */
dbr_short_t upper_alarm_limit;
dbr_short_t upper_warning_limit;
dbr_short_t lower_warning_limit;
dbr_short_t lower_alarm_limit;
dbr_short_t upper_ctrl_limit; /* upper control limit */
dbr_short_t lower_ctrl_limit; /* lower control limit */
dbr_short_t value; /* current value */
};
struct dbr_ctrl_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_short_t upper_disp_limit; /* upper limit of graph */
dbr_short_t lower_disp_limit; /* lower limit of graph */
dbr_short_t upper_alarm_limit;
dbr_short_t upper_warning_limit;
dbr_short_t lower_warning_limit;
dbr_short_t lower_alarm_limit;
dbr_short_t upper_ctrl_limit; /* upper control limit */
dbr_short_t lower_ctrl_limit; /* lower control limit */
dbr_short_t value; /* current value */
};
/* structure for a control floating point field */
struct dbr_ctrl_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t precision; /* number of decimal places */
dbr_short_t RISC_pad; /* RISC alignment */
char units[8]; /* units of value */
dbr_float_t upper_disp_limit; /* upper limit of graph */
dbr_float_t lower_disp_limit; /* lower limit of graph */
dbr_float_t upper_alarm_limit;
dbr_float_t upper_warning_limit;
dbr_float_t lower_warning_limit;
dbr_float_t lower_alarm_limit;
dbr_float_t upper_ctrl_limit; /* upper control limit */
dbr_float_t lower_ctrl_limit; /* lower control limit */
dbr_float_t value; /* current value */
};
/* structure for a control enumeration field */
struct dbr_ctrl_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t no_str; /* number of strings */
char strs[16][26];
/* state strings */
dbr_enum_t value; /* current value */
};
/* structure for a control char field */
struct dbr_ctrl_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_char_t upper_disp_limit; /* upper limit of graph */
dbr_char_t lower_disp_limit; /* lower limit of graph */
dbr_char_t upper_alarm_limit;
dbr_char_t upper_warning_limit;
dbr_char_t lower_warning_limit;
dbr_char_t lower_alarm_limit;
dbr_char_t upper_ctrl_limit; /* upper control limit */
dbr_char_t lower_ctrl_limit; /* lower control limit */
dbr_char_t RISC_pad; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a control long field */
struct dbr_ctrl_long{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
char units[8]; /* units of value */
dbr_long_t upper_disp_limit; /* upper limit of graph */
dbr_long_t lower_disp_limit; /* lower limit of graph */
dbr_long_t upper_alarm_limit;
dbr_long_t upper_warning_limit;
dbr_long_t lower_warning_limit;
dbr_long_t lower_alarm_limit;
dbr_long_t upper_ctrl_limit; /* upper control limit */
dbr_long_t lower_ctrl_limit; /* lower control limit */
dbr_long_t value; /* current value */
};
/* structure for a control double field */
typedef struct dbr_ctrl_double{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t precision; /* number of decimal places */
dbr_short_t RISC_pad0; /* RISC alignment */
char units[8]; /* units of value */
dbr_double_t upper_disp_limit; /* upper limit of graph */
dbr_double_t lower_disp_limit; /* lower limit of graph */
dbr_double_t upper_alarm_limit;
dbr_double_t upper_warning_limit;
dbr_double_t lower_warning_limit;
dbr_double_t lower_alarm_limit;
dbr_double_t upper_ctrl_limit; /* upper control limit */
dbr_double_t lower_ctrl_limit; /* lower control limit */
dbr_double_t value; /* current value */
};
/* union for each fetch buffers */
union db_access_val{
dbr_string_t strval; /* string max size */
dbr_short_t shrtval; /* short */
dbr_short_t intval; /* short */
dbr_float_t fltval; /* IEEE Float */
dbr_enum_t enmval; /* item number */
dbr_char_t charval; /* character */
dbr_long_t longval; /* long */
dbr_double_t doubleval; /* double */
struct dbr_sts_string sstrval; /* string field with status */
struct dbr_sts_short sshrtval; /* short field with status */
struct dbr_sts_float sfltval; /* float field with status */
struct dbr_sts_enum senmval; /* item number with status */
struct dbr_sts_char schrval; /* char field with status */
struct dbr_sts_long slngval; /* long field with status */
struct dbr_sts_double sdblval; /* double field with time */
struct dbr_time_string tstrval; /* string field with time */
struct dbr_time_short tshrtval; /* short field with time */
struct dbr_time_float tfltval; /* float field with time */
struct dbr_time_enum tenmval; /* item number with time */
struct dbr_time_char tchrval; /* char field with time */
struct dbr_time_long tlngval; /* long field with time */
struct dbr_time_double tdblval; /* double field with time */
struct dbr_sts_string gstrval; /* graphic string info */
struct dbr_gr_short gshrtval; /* graphic short info */
struct dbr_gr_float gfltval; /* graphic float info */
struct dbr_gr_enum genmval; /* graphic item info */
struct dbr_gr_char gchrval; /* graphic char info */
struct dbr_gr_long glngval; /* graphic long info */
struct dbr_gr_double gdblval; /* graphic double info */
struct dbr_sts_string cstrval; /* control string info */
struct dbr_ctrl_short cshrtval; /* control short info */
struct dbr_ctrl_float cfltval; /* control float info */
struct dbr_ctrl_enum cenmval; /* control item info */
struct dbr_ctrl_char cchrval; /* control char info */
struct dbr_ctrl_long clngval; /* control long info */
struct dbr_ctrl_double cdblval; /* control double info */
dbr_put_ackt_t putackt; /* item number */
dbr_put_acks_t putacks; /* item number */
struct dbr_sts_string sastrval; /* string field with status */
dbr_string_t classname; /* string max size */
};
/* size for each type - array indexed by the DBR_ type code */
const unsigned short dbr_size[];
/* size for each type's value - array indexed by the DBR_ type code */
const unsigned short dbr_value_size[];
const unsigned short dbr_value_offset[];
const char *dbf_text[];
const short dbf_text_dim;
const char *dbf_text_invalid;
const char *dbr_text[];
const short dbr_text_dim;
const char *dbr_text_invalid;
""")
def get_libca():
cwd = os.path.dirname(os.path.abspath(__file__))
osname = platform.system()
is64bit = sys.maxsize > 2**32
flags = 0
if osname == 'Darwin':
libca_dir = 'lib'
host_arch = 'darwin-x86'
libca_name = 'libca.dylib'
elif osname == 'Linux':
libca_dir = 'lib'
libca_name = 'libca.so'
flags = ffi.RTLD_NODELETE
if is64bit:
host_arch = 'linux-x86_64'
else:
host_arch = 'linux-x86'
elif osname == 'Windows':
libca_dir = 'bin'
libca_name = 'ca.dll'
if is64bit:
host_arch = 'windows-x64'
else:
host_arch = 'win32-x86'
else:
raise OSError('Unsupported Operation System %s' % osname)
# system defined epics installation precedes package bounded library
epics_base = os.environ.get('EPICS_BASE', '')
if os.path.exists(epics_base):
host_arch = os.environ.get('EPICS_HOST_ARCH', host_arch)
return os.path.join(epics_base, libca_dir, host_arch), libca_name, flags
else:
return os.path.join(cwd, 'lib', host_arch), libca_name, flags
#
libca_path, libca_name, libca_flags = get_libca()
# save and set current dir to ca library's path
old_cwd = os.getcwd()
if os.path.exists(libca_path):
os.chdir(libca_path)
else:
libca_path = ''
# load ca library
libca = ffi.dlopen(os.path.join(libca_path, libca_name), libca_flags)
# restore current dir
os.chdir(old_cwd)
| StarcoderdataPython |
1810891 | # Forked from https://github.com/IanLewis/django-ses/tree/bounce_notifications
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from raven.contrib.django.raven_compat.models import client
from canvas import json
from canvas.util import logger
from canvas.view_guards import require_POST
from drawquest.apps.bounces import models
from drawquest.apps.bounces.verification import verify_bounce_message
@require_POST
@csrf_exempt
def handle_notification(request):
""" Handle bounced emails via an SNS webhook. """
try:
notification = json.loads(request.body)
except ValueError, e:
client.captureException()
return HttpResponseBadRequest()
if (settings.AWS_SES_VERIFY_BOUNCE_SIGNATURES and
not verify_bounce_message(notification)):
# Don't send any info back when the notification is not verified.
logger.info('Received unverified notification: Type: %s',
notification.get('Type'),
extra={'notification': notification},
)
return HttpResponse()
getattr(models, 'handle_{}'.format(notification.get('Type')))(notification)
# AWS will consider anything other than 200 to be an error response and
# resend the SNS request. We don't need that so we return 200 here.
return HttpResponse()
| StarcoderdataPython |
202686 | # SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
import onnx
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.pipeline import make_pipeline
from onnxruntime import InferenceSession
from skl2onnx import convert_sklearn, to_onnx, wrap_as_onnx_mixin
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.algebra.onnx_ops import OnnxSub, OnnxDiv, OnnxClip, OnnxClip_6
from skl2onnx.algebra.onnx_operator_mixin import OnnxOperatorMixin
from test_utils import dump_data_and_model, TARGET_OPSET
class CustomOpTransformer(BaseEstimator, TransformerMixin,
OnnxOperatorMixin):
def __init__(self, op_version=TARGET_OPSET):
BaseEstimator.__init__(self)
TransformerMixin.__init__(self)
self.op_version = op_version
def fit(self, X, y=None):
self.W_ = np.mean(X, axis=0)
self.S_ = np.std(X, axis=0)
return self
def transform(self, X):
return (X - self.W_) / self.S_
def onnx_shape_calculator(self):
def shape_calculator(operator):
operator.outputs[0].type = operator.inputs[0].type
return shape_calculator
def to_onnx_operator(self, inputs=None, outputs=('Y', ),
target_opset=None, **kwargs):
if inputs is None:
raise RuntimeError("inputs should contain one name")
i0 = self.get_inputs(inputs, 0)
W = self.W_.astype(np.float32)
S = self.S_.astype(np.float32)
return OnnxDiv(
OnnxSub(
i0, W, op_version=self.op_version),
S, output_names=outputs, op_version=self.op_version)
class TestOnnxOperatorMixinSyntax(unittest.TestCase):
def test_way1_convert_sklearn(self):
X = np.arange(20).reshape(10, 2)
tr = KMeans(n_clusters=2)
tr.fit(X)
onx = convert_sklearn(
tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],
target_opset=TARGET_OPSET)
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinWay1ConvertSklearn")
def test_way2_to_onnx(self):
X = np.arange(20).reshape(10, 2)
tr = KMeans(n_clusters=2)
tr.fit(X)
onx = to_onnx(tr, X.astype(np.float32),
target_opset=TARGET_OPSET)
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinWay2ToOnnx")
def test_way3_mixin(self):
X = np.arange(20).reshape(10, 2)
tr = KMeans(n_clusters=2)
tr.fit(X)
try:
tr_mixin = wrap_as_onnx_mixin(tr, target_opset=TARGET_OPSET)
except KeyError as e:
assert ("SklearnGaussianProcessRegressor" in str(e) or
"SklearnGaussianProcessClassifier" in str(e))
return
try:
onx = tr_mixin.to_onnx()
except RuntimeError as e:
assert "Method enumerate_initial_types" in str(e)
onx = tr_mixin.to_onnx(X.astype(np.float32))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinWay3OnnxMixin")
def test_way4_mixin_fit(self):
X = np.arange(20).reshape(10, 2)
try:
tr = wrap_as_onnx_mixin(KMeans(n_clusters=2),
target_opset=TARGET_OPSET)
except KeyError as e:
assert ("SklearnGaussianProcessRegressor" in str(e) or
"SklearnGaussianProcessClassifier" in str(e))
return
tr.fit(X)
onx = tr.to_onnx(X.astype(np.float32))
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinWay4OnnxMixin2")
def test_pipe_way1_convert_sklearn(self):
X = np.arange(20).reshape(10, 2)
tr = make_pipeline(
CustomOpTransformer(op_version=TARGET_OPSET),
KMeans(n_clusters=2))
tr.fit(X)
onx = convert_sklearn(
tr, initial_types=[('X', FloatTensorType((None, X.shape[1])))],
target_opset=TARGET_OPSET)
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinPipeWay1ConvertSklearn")
def test_pipe_way2_to_onnx(self):
X = np.arange(20).reshape(10, 2)
tr = make_pipeline(
CustomOpTransformer(op_version=TARGET_OPSET),
KMeans(n_clusters=2))
tr.fit(X)
onx = to_onnx(tr, X.astype(np.float32), target_opset=TARGET_OPSET)
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinPipeWay2ToOnnx")
def test_pipe_way3_mixin(self):
X = np.arange(20).reshape(10, 2)
tr = make_pipeline(
CustomOpTransformer(op_version=TARGET_OPSET),
KMeans(n_clusters=2))
tr.fit(X)
try:
tr_mixin = wrap_as_onnx_mixin(tr, target_opset=TARGET_OPSET)
except KeyError as e:
assert ("SklearnGaussianProcessRegressor" in str(e) or
"SklearnGaussianProcessClassifier" in str(e))
return
try:
onx = tr_mixin.to_onnx()
except RuntimeError as e:
assert "Method enumerate_initial_types" in str(e)
onx = tr_mixin.to_onnx(X.astype(np.float32))
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinPipeWay3OnnxMixin")
def test_pipe_way4_mixin_fit(self):
X = np.arange(20).reshape(10, 2)
try:
tr = wrap_as_onnx_mixin(
make_pipeline(CustomOpTransformer(), KMeans(n_clusters=2)),
target_opset=TARGET_OPSET)
except KeyError as e:
assert ("SklearnGaussianProcessRegressor" in str(e) or
"SklearnGaussianProcessClassifier" in str(e))
return
tr.fit(X)
onx = tr.to_onnx(X.astype(np.float32))
if TARGET_OPSET == 11:
sonx = str(onx)
if "version: 11" not in sonx or "ir_version: 6" not in sonx:
raise AssertionError("Issue with TARGET_OPSET: {}\n{}".format(
TARGET_OPSET, sonx))
dump_data_and_model(
X.astype(np.float32), tr, onx,
basename="MixinPipeWay4OnnxMixin2")
def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct,
op_version=None, debug=False):
onx = onnx_cl('X', output_names=['Y'])
X = np.array([[1, 2], [3, -4]], dtype=np.float64)
model_def = onx.to_onnx(
{'X': X.astype(np.float32)}, target_opset=op_version)
if debug:
print(model_def)
try:
oinf = InferenceSession(model_def.SerializeToString())
except RuntimeError as e:
if ("Could not find an implementation for the node "
"Cl_Clip:Clip(11)" in str(e)):
# Not yet implemented in onnxruntime
return
raise e
X = X.astype(np.float32)
try:
got = oinf.run(None, {'X': X})[0]
except Exception as e:
raise AssertionError(
"Cannot run model due to %r\n%r\n%s" % (
e, onx, str(model_def))) from e
assert_almost_equal(np_fct(X), got, decimal=6)
@unittest.skipIf(onnx.defs.onnx_opset_version() < 10, "irrelevant")
def test_onnx_clip_10(self):
with self.subTest(name="OnnxClip_6[1e-5, 1e5]"):
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None: OnnxClip_6(
x, min=1e-5, max=1e5, output_names=output_names),
lambda x: np.clip(x, 1e-5, 1e5),
op_version=10)
with self.subTest(name="OnnxClip-10[1e-5, 1e5]"):
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None: OnnxClip(
x, min=1e-5, max=1e5, output_names=output_names,
op_version=10),
lambda x: np.clip(x, 1e-5, 1e5),
op_version=10)
with self.subTest(name="OnnxClip-10[-1e5, 1e-5]"):
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None: OnnxClip(
x, max=1e-5, output_names=output_names,
op_version=10),
lambda x: np.clip(x, -1e5, 1e-5),
op_version=10)
with self.subTest(name="OnnxClip-10[0.1, 2.1]"):
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None: OnnxClip(
x, min=0.1, max=2.1,
output_names=output_names,
op_version=10),
lambda x: np.clip(x, 0.1, 2.1),
op_version=10)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
73855 | #####################################################
# Title: HTML parse- and analyser
# Author: <NAME> (<EMAIL>)
# Licence: GPLv2
#####################################################
#!/usr/bin/python
import sys
import sqlite3
import datetime
import timeit
import math
import re
import pandas as pd
import numpy as np
from time import time, sleep
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
#from sklearn.naive_bayes import *
from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn import tree
from mglearn import make_blobs
import matplotlib.pyplot as plt
import graphviz
'''
OPEN UP DATABASE AND FETCH DATA
'''
def connect_to_database(action, training_db, urls, unknown_samples, sha256):
# Open up training data set
training_db_connection = ""
training_db_cursor = ""
clfnb = MultinomialNB()
clfrf = RandomForestClassifier(random_state=0)
if action == False:
try:
# Connect to training set database
training_db_connection = sqlite3.connect(str(training_db))
training_db_cursor = training_db_connection.cursor()
# Queries for retrieving data to analyse
sql_reg_keys_query = "SELECT sha256, path FROM reg_keys;"
sql_strings_query = "SELECT strings FROM strings;"
training_db_cursor.execute(sql_reg_keys_query)
reg_key_pairs = training_db_cursor.fetchall()
reg_keys_dict = {}
unknown_samples_dict = {}
cur_sha = ""
cur_class_label = 3
class_label=0
reg_keys_list = []
dns_list = []
api_list = []
dll_list = []
tor_related = int(0)
api_string = ""
reg_keys_string = ""
dns_string =""
counter = 0
counter_length = len(reg_key_pairs)
reg_keys_combined = {}
unknown_samples_combined = {}
print("Fetching data from database. Processing.")
for pair in reg_key_pairs:
counter += 1
# Print progress
if counter % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
if counter == (math.ceil(0.1 * counter_length)):
print("10%")
if counter == (math.ceil(0.2* counter_length)):
print("20%")
if counter == (math.ceil(0.5 * counter_length)):
print("50%")
if counter == (math.ceil(0.7 * counter_length)):
print("70%")
if counter == (math.ceil(0.8 * counter_length)):
print("80%")
if counter == (math.ceil(0.9 * counter_length)):
print("90%")
if counter == (math.ceil(0.95 * counter_length)):
print("95%")
if cur_sha != pair[0]:
cur_sha = pair[0]
reg_keys_list = []
api_list = []
dll_list = []
api_string = ""
dll_string = ""
dns_string = ""
reg_keys_string = ""
class_label =[]
else:
reg_keys_list.append(pair[1])
dns_query = "SELECT dns FROM network WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dns_query)
dns_list = training_db_cursor.fetchall()
api_query = "SELECT name,tor_related FROM api_calls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(api_query)
api_list = training_db_cursor.fetchall()
dll_query = "SELECT name FROM dlls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dll_query)
dll_list = training_db_cursor.fetchall()
class_query = "SELECT tor_related FROM label WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(class_query)
class_label = training_db_cursor.fetchall()
# Append data from database
api_string = "".join(str(api_list))
reg_keys_string = "".join(str(reg_keys_list))
dns_string = "".join(str(dns_list))
dll_string = "".join(str(dll_list))
# If 1 or 0, samples are correctly classified. 2 are prediction candidates.
if class_label:
if 0 in class_label[0]:
tor_related = int(0)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 1 in class_label[0]:
tor_related = int(1)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 2 in class_label[0]:
tor_related = int(2)
unknown_samples_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
unknown_samples_combined.update({cur_sha : [reg_keys_string + " " + dns_string + dll_string + " " + api_string, tor_related]})
# Construct data frames from the feature dictionaries
training_df2 = pd.DataFrame(reg_keys_dict).T
training_df3 = pd.DataFrame(reg_keys_combined).T
# Construct a data frame for the unknown sample to be classified as well
unknown_df2 = pd.DataFrame(unknown_samples_dict).T
unknown_df3 = pd.DataFrame(unknown_samples_combined).T
# predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
# If URLs flag enabled, go fetch URLs
if urls == True:
unique_onion_urls = []
print("|-- Tor Malware\n", predictions_SHA256_list)
for prediction_SHA256 in predictions_SHA256_list:
strings_query = "SELECT strings FROM strings WHERE sha256=\'" + prediction_SHA256 + "\';"
dns_query = "SELECT dns FROM network WHERE sha256=\'" + prediction_SHA256 + "\';"
training_db_cursor.execute(strings_query)
predicted_strings = training_db_cursor.fetchall()
# Find .onion URL
for onion_url in predicted_strings:
for string in onion_url:
#tmp_list = re.findall("http[s]?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", string)
#tmp_list = re.findall("(\w+)://([\w\-\.]+)/(\w+).(\w+)", string)
tmp_list = re.findall(r"(?<=\.)([^.]+)(?:\.(?:onion|[^.]+(?:$|\n)))", string)
for i in tmp_list:
if i not in unique_onion_urls:
unique_onion_urls.append(i)
print("|--- Onion URLs \n", unique_onion_urls)
# Close DB connection
training_db_connection.commit()
training_db_connection.close()
except sqlite3.Error as err:
print("Sqlite error:", err)
finally:
training_db_connection.close()
"""
BUILD CLASSIFICATION MODELS
"""
def build_classifiers(df2, df3, unknown_df2, unknown_df3):
# Create bag of words for label:
vect = CountVectorizer(lowercase=False)
vect.fit_transform(df3[0])
X = vect.transform(df3[0])
# If there are unknown samples, make predictions on them.
X_unknown = vect.transform(unknown_df3[0])
# unknown_samples_SHA256 = df3[0].index
#X = pd.DataFrame(X_cand, columns=vect.get_feature_names())
# Target/class labels
y = df2[4]
y = y.astype('int')
# Feature selection
selector = VarianceThreshold(threshold=12)
selector.fit_transform(X)
# 80 / 20 split training and testing data. Shuffle just in case.
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.2)
y_train = y_train.astype('int')
y_test = y_test.astype('int')
# Naive Bayes
mnb = MultinomialNB()
nb_clf = mnb.fit(X_train.toarray(), y_train.to_numpy())
mnb_prediction = nb_clf.predict(X_test.toarray())
mnb_proba = nb_clf.predict_proba(X_test)[:, 1]
mnb_cross_validation_scores = cross_validate(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring=["accuracy", "f1", "recall", "precision", "roc_auc"], n_jobs=-1, return_train_score=True)
mnb_cross_validation_score = cross_val_score(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring="accuracy")
mnb_roc_auc_avg = roc_auc_score(y_test, mnb_prediction)
mnb_balanced_accuracy = balanced_accuracy_score(y_test, mnb_prediction)
mnb_precision, mnb_recall, mnb_threshold = precision_recall_curve(y_test, nb_clf.predict(X_test.toarray()))
mnb_fpr = dict()
mnb_tpr = dict()
mnb_roc_auc = dict()
mnb_fpr[0], mnb_tpr[0], _ = roc_curve(y_test, mnb_proba)
mnb_roc_auc[0] = auc(mnb_fpr[0], mnb_tpr[0])
# Compute micro-average ROC curve and ROC area
mnb_fpr["micro"], mnb_tpr["micro"], _ = roc_curve(y_test.ravel(), mnb_proba.ravel())
mnb_roc_auc["micro"] = auc(mnb_fpr["micro"], mnb_tpr["micro"])
print("\n | ---- MNB cross validation score: ", mnb_cross_validation_score.mean())
print(classification_report(y_test, mnb_prediction))
# Support Vector Machine
clf = svm.SVC(C=2, cache_size=9000, probability=True).fit(X_train, y_train)
svm_proba = clf.predict_proba(X_test)[:, 1]
svm_prediction = clf.predict(X_test)
svm_unknown_sample_predicition = clf.predict(X_unknown)
svm_y_score = clf.decision_function(X_test)
svm_roc_auc_avg = roc_auc_score(y_test, svm_prediction)
svm_cross_validation_scores = cross_validate(clf, X_test, y_test, cv=5, scoring=["accuracy", "balanced_accuracy","precision","f1","recall","roc_auc"], return_train_score=True)
svm_cross_validation_score = cross_val_score(clf, X_test, y_test, cv=5, scoring="accuracy")
svm_precision, svm_recall, svm_threshold = precision_recall_curve(y_test, clf.decision_function(X_test))
svm_close_zero = np.argmin(np.abs(svm_threshold))
svm_fpr = dict()
svm_tpr = dict()
svm_roc_auc = dict()
#svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_prediction)
svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_proba)
#svm_fpr[1], svm_tpr[1], _ = roc_curve(y_test[:,1], svm_y_score[:, 1])
svm_roc_auc[0] = auc(svm_fpr[0], svm_tpr[0])
# Compute micro-average ROC curve and ROC area
svm_fpr["micro"], svm_tpr["micro"], _ = roc_curve(y_test.ravel(), svm_proba.ravel())
svm_roc_auc["micro"] = auc(svm_fpr["micro"], svm_tpr["micro"])
print("\n\n|---- SVM 10-fold cross validation accuracy score:{}".format(np.mean(svm_cross_validation_score)))
# Logistic regression classifier
logreg = LogisticRegression(max_iter=4000).fit(X_train, y_train)
lr_prediction = logreg.predict(X_test)
lr_unknown_predictions = logreg.predict(X_unknown)
lr_proba = logreg.predict_proba(X_test)[:, 1]
lr_decision_function = logreg.decision_function(X_test)
lr_cross_validation_scores = cross_validate(logreg, X_test, y_test, cv=5 , scoring=["accuracy", "balanced_accuracy", "precision", "f1", "recall","roc_auc"], n_jobs=-1, return_train_score=True)
lr_cross_validation_score = cross_val_score(logreg, X_test, y_test, cv=5 , scoring="accuracy")
lr_roc_auc = roc_auc_score(y_test, lr_prediction)
lr_fpr = dict()
lr_tpr = dict()
lr_roc_auc = dict()
lr_fpr[0], lr_tpr[0], _ = roc_curve(y_test, lr_proba)
lr_roc_auc[0] = auc(lr_fpr[0], lr_tpr[0])
lr_fpr["micro"], lr_tpr["micro"], _ = roc_curve(y_test.ravel(), lr_proba.ravel())
lr_roc_auc["micro"] = auc(lr_fpr["micro"], lr_tpr["micro"])
average_precision = average_precision_score(y_test, lr_decision_function)
precision, recall, threshold = precision_recall_curve(y_test, lr_decision_function)
precision1, recall1, f1, supp = precision_recall_fscore_support(y_test, lr_prediction, average="weighted", zero_division=1)
print("\n\n|---- LR 10-fold cross validation accuracy score:{}".format(np.mean(lr_cross_validation_score)))
print(classification_report(y_test, lr_prediction, zero_division=1))
# Random forest classifier
rf_clf = RandomForestClassifier(max_depth=2, random_state=0)
rf_clf.fit(X_train, y_train)
rf_prediction = rf_clf.predict(X_test)
rf_unknown_prediction = rf_clf.predict(X_unknown)
rf_proba = rf_clf.predict_proba(X_test)[:, 1]
rf_fpr = dict()
rf_tpr = dict()
rf_roc_auc = dict()
rf_fpr[0], rf_tpr[0], _ = roc_curve(y_test, rf_prediction)
rf_roc_auc[0] = auc(rf_fpr[0], rf_tpr[0])
rf_fpr["micro"], rf_tpr["micro"], _ = roc_curve(y_test.ravel(), rf_prediction.ravel())
rf_roc_auc["micro"] = auc(rf_fpr["micro"], rf_tpr["micro"])
rf_precision, rf_recall, rf_threshold = precision_recall_curve(y_test, rf_prediction)
rf_cross_validation_score = cross_val_score(rf_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- RF 10-fold cross validation accuracy score: {}", rf_cross_validation_score.mean())
print(classification_report(y_test,rf_prediction))
# Decision tree classifier
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
dt_prediction = dt_clf.predict(X_test)
dt_unknown_prediction = dt_clf.predict(X_unknown)
dt_proba = dt_clf.predict_proba(X_test)[:, 1]
dt_fpr = dict()
dt_tpr = dict()
dt_roc_auc = dict()
dt_fpr[0], dt_tpr[0], _ = roc_curve(y_test, dt_prediction)
dt_roc_auc[0] = auc(dt_fpr[0], dt_tpr[0])
dt_fpr["micro"], dt_tpr["micro"], _ = roc_curve(y_test.ravel(), dt_prediction.ravel())
dt_roc_auc["micro"] = auc(dt_fpr["micro"], dt_tpr["micro"])
dt_precision, dt_recall, dt_threshold = precision_recall_curve(y_test, dt_prediction)
dt_cross_validation_score = cross_val_score(dt_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- DT 10-fold cross validation accuracy score:{} ", dt_cross_validation_score.mean())
print("\nDT score: ", dt_clf.score(X_test, y_test), "\nDT classification report\n\n", classification_report(y_test, dt_prediction), export_text(dt_clf, show_weights=True))
print("DT y_predictions: ", dt_prediction, "y_test: ", y_test)
# Verify predictions with the true labels
verified_predictions_SHA256_list = verify_predictions(dt_prediction, y_test)
# Unseen samples predictions
"""
# Draw AuC RoC
roc_plt = plt
roc_plt.figure()
lw = 2
roc_plt.plot(svm_fpr[0], svm_tpr[0], color='red', lw=lw, label='Support vector machine ROC curve (area = %0.2f)' % svm_roc_auc[0])
roc_plt.plot(lr_fpr[0], lr_tpr[0], color='yellow', lw=lw, label='Logistic regression ROC curve (area = %0.2f)' % lr_roc_auc[0])
roc_plt.plot(mnb_fpr[0], mnb_tpr[0], color='green', lw=lw, label='Multinomial naive Bayes ROC curve (area = %0.2f)' % mnb_roc_auc[0])
roc_plt.plot(rf_fpr[0], rf_tpr[0], color='blue', lw=lw, label='Random Forest ROC curve (area = %0.2f)' % rf_roc_auc[0])
roc_plt.plot(dt_fpr[0], dt_tpr[0], color='purple', lw=lw, label='Decision tree ROC curve (area = %0.2f)' % dt_roc_auc[0])
roc_plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
roc_plt.xlim([0.0, 1.0])
roc_plt.ylim([0.0, 1.05])
roc_plt.xlabel('False Positive Rate')
roc_plt.ylabel('True Positive Rate')
roc_plt.title('Receiver operating characteristic.')
roc_plt.legend(loc="lower right")
roc_plt.grid(True)
#fig_file = str(datetime.datetime.now() + ".png"
roc_plt.savefig("roc.tiff", format="tiff")
# Plot precision and recall graph
plt.plot(precision, recall, label="Logistic regression")
plt.plot(svm_precision, svm_recall, label="Support vector machine")
plt.plot(mnb_precision, mnb_recall, label="Multinomial naive Bayes")
plt.plot(rf_precision, rf_recall, label="Random forest")
plt.plot(dt_precision, dt_recall, label="Decision tree")
plt.xlabel("Precision")
plt.ylabel("Recall")
plt.legend(loc="best")
fig2_file = str(datetime.datetime.now()) + ".tiff"
plt.savefig(fig2_file, format="tiff")
"""
return verified_predictions_SHA256_list
def verify_predictions(X_predictions_list, y_true):
counter = 0;
X_prediction = int(X_predictions_list[counter])
verified_predictions_SHA256_list = []
for y_index, y_value in y_true.items():
if X_prediction == y_value:
print("|--- Prediction matches the true label on file with SHA256: ", y_index)
verified_predictions_SHA256_list.append(y_index)
counter += 1
return verified_predictions_SHA256_list
# Constructor
if __name__ == "__main__":
arguments = docopt(__doc__, version='retomos 0.1')
main(arguments)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.