id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1890761 | <reponame>haribo0915/Spring-Cloud-in-Python
# -*- coding: utf-8 -*-
# standard library
from abc import ABC, abstractmethod
from typing import Optional
__author__ = "Waterball (<EMAIL>)"
__license__ = "Apache 2.0"
__all__ = ["PathElement"]
class PathElement(ABC):
def __init__(self, pos: int, separator):
self.pos = pos
self.separator = separator
self.next: Optional[PathElement] = None
self.prev: Optional[PathElement] = None
@abstractmethod
def matches(self, path_index: int, matching_context) -> bool:
raise NotImplemented
@property
@abstractmethod
def text(self) -> str:
raise NotImplemented
def has_no_next_element(self) -> bool:
return self.next is None
| StarcoderdataPython |
3345208 | <reponame>MeganBeckett/great_expectations<gh_stars>0
import copy
import itertools
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, make_dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.rule_based_profiler.helpers.util import (
build_metric_domain_kwargs,
)
from great_expectations.rule_based_profiler.helpers.util import (
get_batch_ids as get_batch_ids_from_batch_list_or_batch_request,
)
from great_expectations.rule_based_profiler.helpers.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.helpers.util import (
get_validator as get_validator_using_batch_list_or_batch_request,
)
from great_expectations.rule_based_profiler.types import (
Attributes,
Builder,
Domain,
ParameterContainer,
build_parameter_container,
)
from great_expectations.types import SerializableDictDot
from great_expectations.validator.metric_configuration import MetricConfiguration
# TODO: <Alex>These are placeholder types, until a formal metric computation state class is made available.</Alex>
MetricValue = Union[Any, List[Any], np.ndarray]
MetricValues = Union[MetricValue, np.ndarray]
MetricComputationDetails = Dict[str, Any]
MetricComputationResult = make_dataclass(
"MetricComputationResult", ["metric_values", "details"]
)
@dataclass
class AttributedResolvedMetrics(SerializableDictDot):
"""
This class facilitates computing multiple metrics as one operation.
In order to gather results pertaining to diverse MetricConfiguration directives, computed metrics are augmented
with uniquely identifiable attribution object so that receivers can filter them from overall resolved metrics.
"""
metric_values: MetricValues
metric_attributes: Attributes
def add_resolved_metric(self, value: Any) -> None:
if self.metric_values is None:
self.metric_values = []
self.metric_values.append(value)
@property
def id(self) -> str:
return self.metric_attributes.to_id()
def to_dict(self) -> dict:
return asdict(self)
def to_json_dict(self) -> dict:
return convert_to_json_serializable(data=self.to_dict())
class ParameterBuilder(Builder, ABC):
"""
A ParameterBuilder implementation provides support for building Expectation Configuration Parameters suitable for
use in other ParameterBuilders or in ConfigurationBuilders as part of profiling.
A ParameterBuilder is configured as part of a ProfilerRule. Its primary interface is the `build_parameters` method.
As part of a ProfilerRule, the following configuration will create a new parameter for each domain returned by the
domain_builder, with an associated id.
```
parameter_builders:
- name: my_parameter_builder
class_name: MetricMultiBatchParameterBuilder
metric_name: column.mean
```
"""
def __init__(
self,
name: str,
json_serialize: Union[str, bool] = True,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[
Union[str, BatchRequest, RuntimeBatchRequest, dict]
] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
):
"""
The ParameterBuilder will build parameters for the active domain from the rule.
Args:
name: the name of this parameter builder -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
json_serialize: If True (default), convert computed value to JSON prior to saving results.
batch_list: explicitly passed Batch objects for parameter computation (take precedence over batch_request).
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
data_context: DataContext
"""
super().__init__(
batch_list=batch_list,
batch_request=batch_request,
data_context=data_context,
)
self._name = name
self._json_serialize = json_serialize
def build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
parameter_computation_impl: Optional[Callable] = None,
json_serialize: Optional[bool] = None,
) -> None:
computed_parameter_value: Any
parameter_computation_details: dict
if parameter_computation_impl is None:
parameter_computation_impl = self._build_parameters
(
computed_parameter_value,
parameter_computation_details,
) = parameter_computation_impl(
parameter_container=parameter_container,
domain=domain,
variables=variables,
parameters=parameters,
)
if json_serialize is None:
# Obtain json_serialize directive from "rule state" (i.e., variables and parameters); from instance variable otherwise.
json_serialize = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=self.json_serialize,
expected_return_type=bool,
variables=variables,
parameters=parameters,
)
parameter_values: Dict[str, Any] = {
self.fully_qualified_parameter_name: {
"value": convert_to_json_serializable(data=computed_parameter_value)
if json_serialize
else computed_parameter_value,
"details": parameter_computation_details,
},
}
build_parameter_container(
parameter_container=parameter_container, parameter_values=parameter_values
)
@property
@abstractmethod
def fully_qualified_parameter_name(self) -> str:
pass
@property
def name(self) -> str:
return self._name
@property
def json_serialize(self) -> Union[str, bool]:
return self._json_serialize
@abstractmethod
def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Tuple[Any, dict]:
"""
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional
details.
return: Tuple containing computed_parameter_value and parameter_computation_details metadata.
"""
pass
def get_validator(
self,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional["Validator"]: # noqa: F821
return get_validator_using_batch_list_or_batch_request(
purpose="parameter_builder",
data_context=self.data_context,
batch_list=self.batch_list,
batch_request=self.batch_request,
domain=domain,
variables=variables,
parameters=parameters,
)
def get_batch_ids(
self,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[List[str]]:
return get_batch_ids_from_batch_list_or_batch_request(
data_context=self.data_context,
batch_list=self.batch_list,
batch_request=self.batch_request,
domain=domain,
variables=variables,
parameters=parameters,
)
def get_metrics(
self,
metric_name: str,
metric_domain_kwargs: Optional[
Union[Union[str, dict], List[Union[str, dict]]]
] = None,
metric_value_kwargs: Optional[
Union[Union[str, dict], List[Union[str, dict]]]
] = None,
enforce_numeric_metric: Union[str, bool] = False,
replace_nan_with_zero: Union[str, bool] = False,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> MetricComputationResult:
"""
General multi-batch metric computation facility.
Computes specified metric (can be multi-dimensional, numeric, non-numeric, or mixed) and conditions (or
"sanitizes") result according to two criteria: enforcing metric output to be numeric and handling NaN values.
:param metric_name: Name of metric of interest, being computed.
:param metric_domain_kwargs: Metric Domain Kwargs is an essential parameter of the MetricConfiguration object.
:param metric_value_kwargs: Metric Value Kwargs is an essential parameter of the MetricConfiguration object.
:param enforce_numeric_metric: Flag controlling whether or not metric output must be numerically-valued.
:param replace_nan_with_zero: Directive controlling how NaN metric values, if encountered, should be handled.
:param domain: Domain object scoping "$variable"/"$parameter"-style references in configuration and runtime.
:param variables: Part of the "rule state" available for "$variable"-style references.
:param parameters: Part of the "rule state" available for "$parameter"-style references.
:return: MetricComputationResult object, containing both: data samples in the format "N x R^m", where "N" (most
significant dimension) is the number of measurements (e.g., one per Batch of data), while "R^m" is the
multi-dimensional metric, whose values are being estimated, and details (to be used for metadata purposes).
"""
batch_ids: Optional[List[str]] = self.get_batch_ids(
domain=domain,
variables=variables,
parameters=parameters,
)
if not batch_ids:
raise ge_exceptions.ProfilerExecutionError(
message=f"Utilizing a {self.__class__.__name__} requires a non-empty list of batch identifiers."
)
"""
Compute metrics, corresponding to multiple "MetricConfiguration" directives, together, rather than individually.
As a strategy, since "metric_domain_kwargs" changes depending on "batch_id", "metric_value_kwargs" serves as
identifying entity (through "AttributedResolvedMetrics") for accessing resolved metrics (computation results).
All "MetricConfiguration" directives are generated by combining each metric_value_kwargs" with
"metric_domain_kwargs" for all "batch_ids" (where every "metric_domain_kwargs" represents separate "batch_id").
Then, all "MetricConfiguration" objects, collected into list as container, are resolved simultaneously.
"""
# First: Gather "metric_domain_kwargs" (corresponding to "batch_ids").
domain_kwargs: dict = build_metric_domain_kwargs(
batch_id=None,
metric_domain_kwargs=metric_domain_kwargs,
domain=domain,
variables=variables,
parameters=parameters,
)
batch_id: str
metric_domain_kwargs = [
copy.deepcopy(
build_metric_domain_kwargs(
batch_id=batch_id,
metric_domain_kwargs=copy.deepcopy(domain_kwargs),
domain=domain,
variables=variables,
parameters=parameters,
)
)
for batch_id in batch_ids
]
# Second: Gather "metric_value_kwargs" (caller may require same metric computed for multiple arguments).
if not isinstance(metric_value_kwargs, list):
metric_value_kwargs = [metric_value_kwargs]
value_kwargs_cursor: dict
metric_value_kwargs = [
# Obtain value kwargs from "rule state" (i.e., variables and parameters); from instance variable otherwise.
get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=value_kwargs_cursor,
expected_return_type=None,
variables=variables,
parameters=parameters,
)
for value_kwargs_cursor in metric_value_kwargs
]
# Third: Generate "MetricConfiguration" directives for all "metric_domain_kwargs" / "metric_value_kwargs" pairs.
domain_kwargs_cursor: dict
kwargs_combinations: List[List[dict]] = [
[domain_kwargs_cursor, value_kwargs_cursor]
for value_kwargs_cursor in metric_value_kwargs
for domain_kwargs_cursor in metric_domain_kwargs
]
kwargs_pair_cursor: List[dict, dict]
metrics_to_resolve: List[MetricConfiguration] = [
MetricConfiguration(
metric_name=metric_name,
metric_domain_kwargs=kwargs_pair_cursor[0],
metric_value_kwargs=kwargs_pair_cursor[1],
metric_dependencies=None,
)
for kwargs_pair_cursor in kwargs_combinations
]
# Fourth: Resolve all metrics in one operation simultaneously.
# The Validator object used for metric calculation purposes.
validator: "Validator" = self.get_validator( # noqa: F821
domain=domain,
variables=variables,
parameters=parameters,
)
resolved_metrics: Dict[Tuple[str, str, str], Any] = validator.compute_metrics(
metric_configurations=metrics_to_resolve
)
# Fifth: Map resolved metrics to their attributes for identification and recovery by receiver.
metric_configuration: MetricConfiguration
attributed_resolved_metrics_map: Dict[str, AttributedResolvedMetrics] = {}
for metric_configuration in metrics_to_resolve:
attributed_resolved_metrics: AttributedResolvedMetrics = (
attributed_resolved_metrics_map.get(
metric_configuration.metric_value_kwargs_id
)
)
if attributed_resolved_metrics is None:
attributed_resolved_metrics = AttributedResolvedMetrics(
metric_attributes=metric_configuration.metric_value_kwargs,
metric_values=[],
)
attributed_resolved_metrics_map[
metric_configuration.metric_value_kwargs_id
] = attributed_resolved_metrics
resolved_metric_value: Union[
Tuple[str, str, str], None
] = resolved_metrics.get(metric_configuration.id)
if resolved_metric_value is None:
raise ge_exceptions.ProfilerExecutionError(
f"{metric_configuration.id[0]} was not found in the resolved Metrics for ParameterBuilder."
)
attributed_resolved_metrics.add_resolved_metric(value=resolved_metric_value)
metric_attributes_id: str
metric_values: AttributedResolvedMetrics
# Sixth: Leverage Numpy Array capabilities for subsequent operations on results of computed/resolved metrics.
attributed_resolved_metrics_map = {
metric_attributes_id: AttributedResolvedMetrics(
metric_attributes=metric_values.metric_attributes,
metric_values=np.array(metric_values.metric_values),
)
for metric_attributes_id, metric_values in attributed_resolved_metrics_map.items()
}
# Seventh: Convert scalar metric values to vectors to enable uniformity of processing in subsequent operations.
idx: int
for (
metric_attributes_id,
metric_values,
) in attributed_resolved_metrics_map.items():
if metric_values.metric_values.ndim == 1:
metric_values.metric_values = [
[metric_values.metric_values[idx]] for idx in range(len(batch_ids))
]
metric_values.metric_values = np.array(metric_values.metric_values)
attributed_resolved_metrics_map[metric_attributes_id] = metric_values
# Eighth: Apply numeric/hygiene directives (e.g., "enforce_numeric_metric", "replace_nan_with_zero") to results.
for (
metric_attributes_id,
metric_values,
) in attributed_resolved_metrics_map.items():
self._sanitize_metric_computation(
metric_name=metric_name,
metric_values=metric_values.metric_values,
enforce_numeric_metric=enforce_numeric_metric,
replace_nan_with_zero=replace_nan_with_zero,
domain=domain,
variables=variables,
parameters=parameters,
)
# Ninth: Compose and return result to receiver (apply simplifications to cases of single "metric_value_kwargs").
return MetricComputationResult(
list(attributed_resolved_metrics_map.values()),
details={
"metric_configuration": {
"metric_name": metric_name,
"domain_kwargs": domain_kwargs,
"metric_value_kwargs": metric_value_kwargs[0]
if len(metric_value_kwargs) == 1
else metric_value_kwargs,
"metric_dependencies": None,
},
"num_batches": len(batch_ids),
},
)
def _sanitize_metric_computation(
self,
metric_name: str,
metric_values: np.ndarray,
enforce_numeric_metric: Union[str, bool] = False,
replace_nan_with_zero: Union[str, bool] = False,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> np.ndarray:
"""
This method conditions (or "sanitizes") data samples in the format "N x R^m", where "N" (most significant
dimension) is the number of measurements (e.g., one per Batch of data), while "R^m" is the multi-dimensional
metric, whose values are being estimated. The "conditioning" operations are:
1. If "enforce_numeric_metric" flag is set, raise an error if a non-numeric value is found in sample vectors.
2. Further, if a NaN is encountered in a sample vectors and "replace_nan_with_zero" is True, then replace those
NaN values with the 0.0 floating point number; if "replace_nan_with_zero" is False, then raise an error.
"""
# Obtain enforce_numeric_metric from "rule state" (i.e., variables and parameters); from instance variable otherwise.
enforce_numeric_metric = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=enforce_numeric_metric,
expected_return_type=bool,
variables=variables,
parameters=parameters,
)
# Obtain replace_nan_with_zero from "rule state" (i.e., variables and parameters); from instance variable otherwise.
replace_nan_with_zero = get_parameter_value_and_validate_return_type(
domain=domain,
parameter_reference=replace_nan_with_zero,
expected_return_type=bool,
variables=variables,
parameters=parameters,
)
# Outer-most dimension is data samples (e.g., one per Batch); the rest are dimensions of the actual metric.
metric_value_shape: tuple = metric_values.shape[1:]
# Generate all permutations of indexes for accessing every element of the multi-dimensional metric.
metric_value_shape_idx: int
axes: List[np.ndarray] = [
np.indices(dimensions=(metric_value_shape_idx,))[0]
for metric_value_shape_idx in metric_value_shape
]
metric_value_indices: List[tuple] = list(itertools.product(*tuple(axes)))
# Generate all permutations of indexes for accessing estimates of every element of the multi-dimensional metric.
# Prefixing multi-dimensional index with "(slice(None, None, None),)" is equivalent to "[:,]" access.
metric_value_idx: tuple
metric_value_vector_indices: List[tuple] = [
(slice(None, None, None),) + metric_value_idx
for metric_value_idx in metric_value_indices
]
# Traverse indices of sample vectors corresponding to every element of multi-dimensional metric.
metric_value_vector: np.ndarray
for metric_value_idx in metric_value_vector_indices:
# Obtain "N"-element-long vector of samples for each element of multi-dimensional metric.
metric_value_vector = metric_values[metric_value_idx]
if enforce_numeric_metric:
if not np.issubdtype(metric_value_vector.dtype, np.number):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Applicability of {self.__class__.__name__} is restricted to numeric-valued metrics \
(value of type "{str(metric_value_vector.dtype)}" was computed).
"""
)
if np.any(np.isnan(metric_value_vector)):
if not replace_nan_with_zero:
raise ValueError(
f"""Computation of metric "{metric_name}" resulted in NaN ("not a number") value.
"""
)
np.nan_to_num(metric_value_vector, copy=False, nan=0.0)
return metric_values
| StarcoderdataPython |
1861257 | """
:mod:`zsl.utils.email_helper`
-----------------------------
"""
from __future__ import unicode_literals
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from zsl import Config, Injected, inject
@inject(config=Config)
def send_email(sender, receivers, subject, text=None, html=None, charset='utf-8', config=Injected):
"""Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
"""
smtp_config = config['SMTP']
# Receivers must be an array.
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
# Create the messages
msgs = []
if text is not None:
msgs.append(MIMEText(text, 'plain', charset))
if html is not None:
msgs.append(MIMEText(html, 'html', charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
# Default sender.
if sender is None:
sender = smtp_config['SENDER']
# Fill the info.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ", ".join(receivers)
# Send.
smtp_server = smtplib.SMTP(**(smtp_config['SERVER']))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit()
| StarcoderdataPython |
5198423 | from django_etuovi.utils.testing import check_dataclass_typing
from connections.etuovi.etuovi_mapper import map_apartment_to_item
from connections.tests.factories import ApartmentFactory, ApartmentMinimalFactory
def test__apartment__to_item_mapping_types():
apartment = ApartmentFactory()
item = map_apartment_to_item(apartment)
check_dataclass_typing(item)
def test__apartment_minimal__to_item_mapping_types():
apartment = ApartmentMinimalFactory()
item = map_apartment_to_item(apartment)
check_dataclass_typing(item)
def test_elastic_to_etuovi_missing_apartment__project_holding_type():
elastic_apartment = ApartmentMinimalFactory(project_holding_type=None)
try:
map_apartment_to_item(elastic_apartment)
except ValueError as e:
assert "project_holding_type" in str(e)
return
raise Exception("Missing project_holding_type should have thrown a ValueError")
def test_elastic_to_etuovi_missing_apartment__project_building_type():
elastic_apartment = ApartmentMinimalFactory(project_building_type=None)
try:
map_apartment_to_item(elastic_apartment)
except ValueError as e:
assert "project_building_type" in str(e)
return
raise Exception("Missing project_building_type should have thrown a ValueError")
| StarcoderdataPython |
56235 | # -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-04-09 12:37:36
# @Last Modified by: 何睿
# @Last Modified time: 2019-04-09 15:57:00
from collections import Counter
class Solution:
def topKFrequent(self, nums: [int], k: int) -> [int]:
# 桶
bucket = dict()
# 构建字典,键位数字,值为该数字出现过的次数
table = Counter(nums)
result, count = [], 0
# 以元素出现的次数位键,该次数下的所有元素构成的 List 为值
for num, times in table.items():
if times not in bucket: bucket[times] = []
bucket[times].append(num)
# 出现的最大次数
maxtime = max(table.values())
for time in range(maxtime, 0, -1):
# 如果该次数下有元素
if time in bucket:
# 提取当前次数下的所有元素到结果中
result.extend(bucket[time])
count += len(bucket[time])
if count == k: break
return result | StarcoderdataPython |
4908601 | <reponame>emersonnobre/python-basics<filename>print.py
from datetime import datetime
register_log_file = None
try:
register_log_file = open("data/register_log.txt", "r+")
register = str(datetime.today()) + " || " + "Type the register >> 0"
print(register, file=register_log_file)
register_log_file.close()
except FileNotFoundError:
print("The file was not found")
print("Some content", end="@@\n")
# print sem usar for para arrays (dica top)
l = [2, 4, 2, 4, 3]
print(*l)
# Formatando as saídas com o module operator (%)
print("First value: %2d and seconde value: %5.2f" % (43, 06.4433))
# Formatando as saídas com format
print("First value: {0} and second value: {1:5.1f}".format(12, 0.432))
# Print das informações de um dicionaŕio
data = dict(name = "Brenda", verb = "love")
print("I {verb} {name}".format(**data))
# Print centralzando a mensagem no meio, na esquerda e direita
center_string = "I love geeksforgeeks"
print(center_string.center(40, '#'))
print(center_string.ljust(40, '-'))
print(center_string.rjust(40, '-')) | StarcoderdataPython |
8023277 | #!/usr/bin/env python
"""
Generate a file of X Mb with text, where X is fetched from the
command line.
"""
def generate(Mb, filename='tmp.dat'):
line = 'here is some line with a number %09d and no useful text\n'
line_len = len(line) - 4 + 9 # length of each line
nlines = int(Mb*1000000/line_len) # no of lines to generate Mb megabytes
print 'generting %d lines in %s' % (nlines, filename)
f = open(filename, 'w')
for i in xrange(nlines):
f.write(line % i)
f.close()
if __name__ == '__main__':
import sys
generate(float(sys.argv[1]))
| StarcoderdataPython |
314609 | def moeda(p = 0, moeda = 'R$'):
return (f'{moeda}{p:.2f}'.replace('.',','))
def metade(p = 0, formato=False):
res = p/2
return res if formato is False else moeda(res)
def dobro(p = 0, formato=False):
res = p*2
return res if formato is False else moeda(res)
def aumentar(p = 0, taxa = 0, formato=False):
res = p * (1+taxa/100)
return res if formato is False else moeda(res)
def diminuir(p = 0, taxa = 0, formato=False):
res = p - (p * taxa/100)
return res if formato is False else moeda(res) | StarcoderdataPython |
3217505 | #
# Copyright (C) 2013 Webvirtmgr.
#
import libvirt
import threading
import socket
from vrtManager import util
from libvirt import libvirtError
from vrtManager.rwlock import ReadWriteLock
CONN_SOCKET = 4
CONN_TLS = 3
CONN_SSH = 2
CONN_TCP = 1
TLS_PORT = 16514
SSH_PORT = 22
TCP_PORT = 16509
LIBVIRT_KEEPALIVE_INTERVAL = 5
LIBVIRT_KEEPALIVE_COUNT = 5
class wvmEventLoop(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
# register the default event implementation
# of libvirt, as we do not have an existing
# event loop.
libvirt.virEventRegisterDefaultImpl()
if name is None:
name = 'libvirt event loop'
super(wvmEventLoop, self).__init__(group, target, name, args, kwargs)
# we run this thread in deamon mode, so it does
# not block shutdown of the server
self.daemon = True
def run(self):
while True:
# if this method will fail it raises libvirtError
# we do not catch the exception here so it will show up
# in the logs. Not sure when this call will ever fail
libvirt.virEventRunDefaultImpl()
class wvmConnection(object):
"""
class representing a single connection stored in the Connection Manager
# to-do: may also need some locking to ensure to not connect simultaniously in 2 threads
"""
def __init__(self, host, login, passwd, conn):
"""
Sets all class attributes and tries to open the connection
"""
# connection lock is used to lock all changes to the connection state attributes
# (connection and last_error)
self.connection_state_lock = threading.Lock()
self.connection = None
self.last_error = None
# credentials
self.host = host
self.login = login
self.passwd = <PASSWORD>
self.type = conn
# connect
self.connect()
def connect(self):
self.connection_state_lock.acquire()
try:
# recheck if we have a connection (it may have been
if not self.connected:
if self.type == CONN_TCP:
self.__connect_tcp()
elif self.type == CONN_SSH:
self.__connect_ssh()
elif self.type == CONN_TLS:
self.__connect_tls()
elif self.type == CONN_SOCKET:
self.__connect_socket()
else:
raise ValueError('"{type}" is not a valid connection type'.format(type=self.type))
if self.connected:
# do some preprocessing of the connection:
# * set keep alive interval
# * set connection close/fail handler
try:
self.connection.setKeepAlive(connection_manager.keepalive_interval, connection_manager.keepalive_count)
try:
self.connection.registerCloseCallback(self.__connection_close_callback, None)
except:
# Temporary fix for libvirt > libvirt-0.10.2-41
pass
except libvirtError as e:
# hypervisor driver does not seem to support persistent connections
self.last_error = str(e)
finally:
self.connection_state_lock.release()
@property
def connected(self):
try:
return self.connection is not None and self.connection.isAlive()
except libvirtError:
# isAlive failed for some reason
return False
def __libvirt_auth_credentials_callback(self, credentials, user_data):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = self.login
if len(credential[4]) == 0:
credential[4] = credential[3]
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = self.passwd
else:
return -1
return 0
def __connection_close_callback(self, connection, reason, opaque=None):
self.connection_state_lock.acquire()
try:
# on server shutdown libvirt module gets freed before the close callbacks are called
# so we just check here if it is still present
if libvirt is not None:
if (reason == libvirt.VIR_CONNECT_CLOSE_REASON_ERROR):
self.last_error = 'connection closed: Misc I/O error'
elif (reason == libvirt.VIR_CONNECT_CLOSE_REASON_EOF):
self.last_error = 'connection closed: End-of-file from server'
elif (reason == libvirt.VIR_CONNECT_CLOSE_REASON_KEEPALIVE):
self.last_error = 'connection closed: Keepalive timer triggered'
elif (reason == libvirt.VIR_CONNECT_CLOSE_REASON_CLIENT):
self.last_error = 'connection closed: Client requested it'
else:
self.last_error = 'connection closed: Unknown error'
# prevent other threads from using the connection (in the future)
self.connection = None
finally:
self.connection_state_lock.release()
def __connect_tcp(self):
flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
auth = [flags, self.__libvirt_auth_credentials_callback, None]
uri = 'qemu+tcp://%s/system' % self.host
try:
self.connection = libvirt.openAuth(uri, auth, 0)
self.last_error = None
except libvirtError as e:
self.last_error = 'Connection Failed: ' + str(e)
self.connection = None
def __connect_ssh(self,param=None):
if param: uri = 'qemu+ssh://%s@%s/system' % (self.login, self.host)
else: uri = 'qemu+ssh://%s@%s/system?no_tty=1' % (self.login, self.host)
try:
self.connection = libvirt.open(uri)
self.last_error = None
except libvirtError as e:
self.last_error = 'Connection Failed: ' + str(e) + ' --- ' + repr(libvirt.virGetLastError())
self.connection = None
def __connect_tls(self):
flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
auth = [flags, self.__libvirt_auth_credentials_callback, None]
uri = 'qemu+tls://%s@%s/system' % (self.login, self.host)
try:
self.connection = libvirt.openAuth(uri, auth, 0)
self.last_error = None
except libvirtError as e:
self.last_error = 'Connection Failed: ' + str(e)
self.connection = None
def __connect_socket(self):
uri = 'qemu:///system'
try:
self.connection = libvirt.open(uri)
self.last_error = None
except libvirtError as e:
self.last_error = 'Connection Failed: ' + str(e)
self.connection = None
def close(self):
"""
closes the connection (if it is active)
"""
self.connection_state_lock.acquire()
try:
if self.connected:
try:
# to-do: handle errors?
self.connection.close()
except libvirtError:
pass
self.connection = None
self.last_error = None
finally:
self.connection_state_lock.release()
def __del__(self):
if self.connection is not None:
# unregister callback (as it is no longer valid if this instance gets deleted)
try:
self.connection.unregisterCloseCallback()
except:
pass
def __unicode__(self):
if self.type == CONN_TCP:
type_str = u'tcp'
elif self.type == CONN_SSH:
type_str = u'ssh'
elif self.type == CONN_TLS:
type_str = u'tls'
else:
type_str = u'invalid_type'
return u'qemu+{type}://{user}@{host}/system'.format(type=type_str, user=self.login, host=self.host)
def __repr__(self):
return '<wvmConnection {connection_str}>'.format(connection_str=str(self,'utf-8'))
class wvmConnectionManager(object):
def __init__(self, keepalive_interval=5, keepalive_count=5):
self.keepalive_interval = keepalive_interval
self.keepalive_count = keepalive_count
# connection dict
# maps hostnames to a list of connection objects for this hostname
# atm it is possible to create more than one connection per hostname
# with different logins or auth methods
# connections are shared between all threads, see:
# http://wiki.libvirt.org/page/FAQ#Is_libvirt_thread_safe.3F
self._connections = dict()
self._connections_lock = ReadWriteLock()
# start event loop to handle keepalive requests and other events
self._event_loop = wvmEventLoop()
self._event_loop.start()
def _search_connection(self, host, login, passwd, conn):
"""
search the connection dict for a connection with the given credentials
if it does not exist return None
"""
self._connections_lock.acquireRead()
try:
if host in self._connections:
connections = self._connections[host]
for connection in connections:
if connection.login == login and connection.passwd == passwd and connection.type == conn:
return connection
finally:
self._connections_lock.release()
return None
def get_connection(self, host, login, passwd, conn):
"""
returns a connection object (as returned by the libvirt.open* methods) for the given host and credentials
raises libvirtError if (re)connecting fails
"""
# force all string values to unicode
# host = str(host).encode('utf-8')
# login = str(login).encode('utf-8')
# passwd = str(passwd).encode('utf-8') if passwd is not None else None
connection = self._search_connection(host, login, passwd, conn)
if connection is None:
self._connections_lock.acquireWrite()
try:
# we have to search for the connection again after aquireing the write lock
# as the thread previously holding the write lock may have already added our connection
connection = self._search_connection(host, login, passwd, conn)
if connection is None:
# create a new connection if a matching connection does not already exist
connection = wvmConnection(host, login, passwd, conn)
# add new connection to connection dict
if host in self._connections:
self._connections[host].append(connection)
else:
self._connections[host] = [connection]
finally:
self._connections_lock.release()
elif not connection.connected:
# try to (re-)connect if connection is closed
connection.connect()
if connection.connected:
# return libvirt connection object
return connection.connection
else:
# raise libvirt error
raise libvirtError(connection.last_error)
def host_is_up(self, conn_type, hostname):
"""
returns True if the given host is up and we are able to establish
a connection using the given credentials.
"""
try:
socket_host = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_host.settimeout(1)
if conn_type == CONN_SSH:
if ':' in hostname:
LIBVIRT_HOST, PORT = (hostname).split(":")
PORT = int(PORT)
else:
PORT = SSH_PORT
LIBVIRT_HOST = hostname
socket_host.connect((LIBVIRT_HOST, PORT))
if conn_type == CONN_TCP:
socket_host.connect((hostname, TCP_PORT))
if conn_type == CONN_TLS:
socket_host.connect((hostname, TLS_PORT))
socket_host.close()
return True
except Exception as err:
return err
connection_manager = wvmConnectionManager(LIBVIRT_KEEPALIVE_INTERVAL, LIBVIRT_KEEPALIVE_COUNT)
class wvmConnect(object):
def __init__(self, host, login, passwd, conn):
self.login = login
self.host = host
self.passwd = <PASSWORD>
self.conn = conn
# get connection from connection manager
self.wvm = connection_manager.get_connection(host, login, passwd, conn)
def get_cap_xml(self):
"""Return xml capabilities"""
return self.wvm.getCapabilities()
def is_kvm_supported(self):
"""Return KVM capabilities."""
return util.is_kvm_available(self.get_cap_xml())
def get_storages(self):
storages = []
for pool in self.wvm.listStoragePools():
storages.append(pool)
for pool in self.wvm.listDefinedStoragePools():
storages.append(pool)
return storages
def get_networks(self):
virtnet = []
for net in self.wvm.listNetworks():
virtnet.append(net)
for net in self.wvm.listDefinedNetworks():
virtnet.append(net)
return virtnet
def get_ifaces(self):
interface = []
for inface in self.wvm.listInterfaces():
interface.append(inface)
for inface in self.wvm.listDefinedInterfaces():
interface.append(inface)
return interface
def get_iface(self, name):
return self.wvm.interfaceLookupByName(name)
def get_secrets(self):
return self.wvm.listSecrets()
def get_secret(self, uuid):
return self.wvm.secretLookupByUUIDString(uuid)
def get_storage(self, name):
return self.wvm.storagePoolLookupByName(name)
def get_volume_by_path(self, path):
return self.wvm.storageVolLookupByPath(path)
def get_network(self, net):
return self.wvm.networkLookupByName(net)
def get_instance(self, name):
return self.wvm.lookupByName(name)
def get_instances(self):
instances = []
for inst_id in self.wvm.listDomainsID():
dom = self.wvm.lookupByID(int(inst_id))
instances.append(dom.name())
for name in self.wvm.listDefinedDomains():
instances.append(name)
return instances
def get_snapshots(self):
instance = []
for snap_id in self.wvm.listDomainsID():
dom = self.wvm.lookupByID(int(snap_id))
if dom.snapshotNum(0) != 0:
instance.append(dom.name())
for name in self.wvm.listDefinedDomains():
dom = self.wvm.lookupByName(name)
if dom.snapshotNum(0) != 0:
instance.append(dom.name())
return instance
def get_net_device(self):
netdevice = []
for dev in self.wvm.listAllDevices(0):
xml = dev.XMLDesc(0)
dev_type = util.get_xml_path(xml, '/device/capability/@type')
if dev_type == 'net':
netdevice.append(util.get_xml_path(xml, '/device/capability/interface'))
return netdevice
def get_host_instances(self):
vname = {}
memory = self.wvm.getInfo()[1] * 1048576
for name in self.get_instances():
dom = self.get_instance(name)
mem = util.get_xml_path(dom.XMLDesc(0), "/domain/currentMemory")
mem = int(mem) * 1024
mem_usage = (mem * 100) / memory
cur_vcpu = util.get_xml_path(dom.XMLDesc(0), "/domain/vcpu/@current")
if cur_vcpu:
vcpu = cur_vcpu
else:
vcpu = util.get_xml_path(dom.XMLDesc(0), "/domain/vcpu")
vname[dom.name()] = (dom.info()[0], vcpu, mem, mem_usage)
return vname
def close(self):
"""Close connection"""
# to-do: do not close connection ;)
# self.wvm.close()
pass | StarcoderdataPython |
1816859 | """
输入整数数组 arr ,找出其中最小的 k 个数。
例如,输入4、5、1、6、2、7、3、8这8个数字,
则最小的4个数字是1、2、3、4。
限制:
0 <= k <= arr.length <= 10000
0 <= arr[i] <= 10000
"""
from typing import List
from random import randint
class Solution:
def getLeastNumbers(self, arr: List[int], k: int) -> List[int]:
"""
最简单的直接sort就完事儿了
o(nlogn)
"""
arr.sort()
return arr[:k]
class Solution2:
"""
这里的思想跟快排一样
就是选取标定点 使得左边的都比标定点小 右边的都比标定点大
"""
def partition(self, nums, l, r):
pivot = nums[r]
i = l - 1
for j in range(l, r):
if nums[j] <= pivot:
i += 1
nums[i], nums[j] = nums[j], nums[i]
nums[i + 1], nums[r] = nums[r], nums[i + 1]
return i + 1
def randomized_partition(self, nums, l, r):
i = randint(l, r)
nums[r], nums[i] = nums[i], nums[r]
return self.partition(nums, l, r)
def randomized_selected(self, arr, l, r, k):
pos = self.randomized_partition(arr, l, r)
num = pos - l + 1
if k < num:
self.randomized_selected(arr, l, pos - 1, k)
elif k > num:
self.randomized_selected(arr, pos + 1, r, k - num)
def getLeastNumbers(self, arr: List[int], k: int) -> List[int]:
if k == 0:
return list()
# 随机选择
self.randomized_selected(arr, 0, len(arr) - 1, k)
return arr[:k]
if __name__ == "__main__":
assert Solution2().getLeastNumbers(arr=[3, 2, 1], k=2) == [1, 2]
assert Solution2().getLeastNumbers(arr=[0, 1, 2, 1], k=1) == [0]
| StarcoderdataPython |
9653323 | <reponame>Caleb68864/GTM_Link_Sender<filename>__main__.py
import pandas
import os
import wx
import FrmMain
import webbrowser
from difflib import get_close_matches
class MyFrame(wx.Frame):
def createLinkFile(self, meetingid, usersdir, user):
filepath = "{}\\{}\Desktop\\IT_Help.bat".format(usersdir, user)
lines = ['START "" "https://www1.gotomeeting.com/join/{}"\n'.format(meetingid), 'del {}\n'.format(filepath)]
file = open(filepath, 'w')
for line in lines:
file.write(line)
file.close()
print("Link Created: {} for Meeting #: {}".format(filepath, meetingid))
def getComputers(self):
df1 = pandas.read_csv("computers.csv")
# print(df1.to_dict())
return df1
def getUsersDir(self):
computer = self.txtComputer.GetValue()
if computer != "":
usersDir = "\\\\{}\\C$\\Users".format(computer)
if os.path.exists(usersDir):
return usersDir
else:
print("That Computer Does Not Exist")
else:
print("Computer Field Empty")
def getUsers(self, usersDir):
# print(usersDir)
try:
users = [d for d in os.listdir(usersDir) if os.path.isdir(os.path.join(usersDir, d))]
return users
except AttributeError:
return []
except OSError:
return []
except TypeError:
return []
def getUser(self, users):
if len(users) > 0:
for user in users:
print("{} | {}".format(users.index(user), user))
selection = int(input("Select User #: "))
user = users[selection]
return user
else:
return ""
def createLink(self, meetingid):
try:
usersdir = self.getUsersDir()
#user = self.getUser(self.getUsers(usersdir))
user = self.cboUsername.GetStringSelection()
#meetingid = input("Enter Meeting ID: ")
self.createLinkFile(meetingid, usersdir, user)
except AttributeError as e:
print(e)
def btnOpenGTMClick(self, instance):
try:
webbrowser.open("https://www.gotomeeting.com", new=0, autoraise=True)
except AttributeError as e:
print(e)
def btnClearClick(self, instance):
self.txtComputer.SetValue("")
self.txtMeetingNum.SetValue("")
self.cboUsername.Clear()
def btnPopulateClick(self, instance):
try:
usersdir = self.getUsersDir()
users = self.getUsers(usersdir)
print(users)
self.cboUsername.Clear()
self.cboUsername.Append(users)
except AttributeError as e:
print(e)
def btnCopyClick(self, instance):
self.createLink(self.txtMeetingNum.GetValue())
def __init__(self, parent):
FrmMain.FrmMain.__init__(self, parent)
self.Show(True)
app = wx.App(False)
frame = MyFrame(None)
app.MainLoop()
| StarcoderdataPython |
8036945 | <reponame>TheNicGard/DungeonStar
class Tile:
def __init__(self, blocked, block_sight=None, window=None):
self.blocked = blocked
if block_sight is None:
block_sight = blocked
self.block_sight = block_sight
self.window = window
self.explored = False
| StarcoderdataPython |
1613209 | # Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
################# LIBRARIES ###############################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, os, sys, pandas as pd, csv, copy
import torch, torch.nn as nn, matplotlib.pyplot as plt, random
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
import pretrainedmodels.utils as utils
import auxiliaries as aux
"""============================================================================"""
################ FUNCTION TO RETURN ALL DATALOADERS NECESSARY ####################
def give_dataloaders(dataset, opt):
"""
Args:
dataset: string, name of dataset for which the dataloaders should be returned.
opt: argparse.Namespace, contains all training-specific parameters.
Returns:
dataloaders: dict of dataloaders for training, testing and evaluation on training.
"""
#Dataset selection
if opt.dataset=='cub200':
datasets = give_CUB200_datasets(opt)
elif opt.dataset=='cars196':
datasets = give_CARS196_datasets(opt)
elif opt.dataset=='online_products':
datasets = give_OnlineProducts_datasets(opt)
elif opt.dataset=='in-shop':
datasets = give_InShop_datasets(opt)
elif opt.dataset=='vehicle_id':
datasets = give_VehicleID_datasets(opt)
else:
raise Exception('No Dataset >{}< available!'.format(dataset))
#Move datasets to dataloaders.
dataloaders = {}
for key,dataset in datasets.items():
is_val = dataset.is_validation
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
return dataloaders
"""============================================================================"""
################# FUNCTIONS TO RETURN TRAIN/VAL PYTORCH DATASETS FOR CUB200, CARS196, STANFORD ONLINE PRODUCTS, IN-SHOP CLOTHES, PKU VEHICLE-ID ####################################
def give_CUB200_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the CUB-200-2011 dataset.
For Metric Learning, the dataset classes are sorted by name, and the first half used for training while the last half is used for testing.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
image_sourcepath = opt.source_path+'/images'
#Find available data classes.
image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))
#Make a index-to-labelname conversion dict.
conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}
#Generate a list of tuples (class_label, image_path)
image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
#Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}
image_dict = {}
for key, img_path in image_list:
key = key-1
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
val_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}
def give_CARS196_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the CARS196 dataset.
For Metric Learning, the dataset classes are sorted by name, and the first half used for training while the last half is used for testing.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
image_sourcepath = opt.source_path+'/images'
#Find available data classes.
image_classes = sorted([x for x in os.listdir(image_sourcepath)])
#Make a index-to-labelname conversion dict.
conversion = {i:x for i,x in enumerate(image_classes)}
#Generate a list of tuples (class_label, image_path)
image_list = {i:sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key)]) for i,key in enumerate(image_classes)}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
#Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}
image_dict = {}
for key, img_path in image_list:
key = key
# key = key-1
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
val_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}
def give_OnlineProducts_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the Online-Products dataset.
For Metric Learning, training and test sets are provided by given text-files, Ebay_train.txt & Ebay_test.txt.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
image_sourcepath = opt.source_path+'/images'
#Load text-files containing classes and imagepaths.
training_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ')
test_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_test.txt', header=0, delimiter=' ')
#Generate Conversion dict.
conversion = {}
for class_id, path in zip(training_files['class_id'],training_files['path']):
conversion[class_id] = path.split('/')[0]
for class_id, path in zip(test_files['class_id'],test_files['path']):
conversion[class_id] = path.split('/')[0]
#Generate image_dicts of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict, val_image_dict = {},{}
for key, img_path in zip(training_files['class_id'],training_files['path']):
key = key-1
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(image_sourcepath+'/'+img_path)
for key, img_path in zip(test_files['class_id'],test_files['path']):
key = key-1
if not key in val_image_dict.keys():
val_image_dict[key] = []
val_image_dict[key].append(image_sourcepath+'/'+img_path)
### Uncomment this if super-labels should be used to generate resp.datasets
# super_conversion = {}
# for super_class_id, path in zip(training_files['super_class_id'],training_files['path']):
# conversion[super_class_id] = path.split('/')[0]
# for key, img_path in zip(training_files['super_class_id'],training_files['path']):
# key = key-1
# if not key in super_train_image_dict.keys():
# super_train_image_dict[key] = []
# super_train_image_dict[key].append(image_sourcepath+'/'+img_path)
# super_train_dataset = BaseTripletDataset(super_train_image_dict, opt, is_validation=True)
# super_train_dataset.conversion = super_conversion
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
val_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}
# return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset, 'super_evaluation':super_train_dataset}
def give_InShop_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the In-Shop Clothes dataset.
For Metric Learning, training and test sets are provided by one text file, list_eval_partition.txt.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing (by query and gallery separation) and evaluation.
"""
#Load train-test-partition text file.
data_info = np.array(pd.read_table(opt.source_path+'/Eval/list_eval_partition.txt', header=1, delim_whitespace=True))[1:,:]
#Separate into training dataset and query/gallery dataset for testing.
train, query, gallery = data_info[data_info[:,2]=='train'][:,:2], data_info[data_info[:,2]=='query'][:,:2], data_info[data_info[:,2]=='gallery'][:,:2]
#Generate conversions(id verson)
# use_train_image_num = 10000
# use_val_image_num = int(use_train_image_num/3)
# np.random.seed(0)
# train_idx = np.random.choice(len(train), size=use_train_image_num, replace = False)
# train = train[train_idx]
# query_idx = np.random.choice(len(query), size=use_val_image_num, replace = False)
# query = query[query_idx]
# gallery_idx = np.random.choice(len(gallery), size=use_val_image_num, replace = False)
# gallery = gallery[gallery_idx]
#Generate conversions
lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in train[:,1]])))}
train[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in train[:,1]])
lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in np.concatenate([query[:,1], gallery[:,1]])])))}
query[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in query[:,1]])
gallery[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in gallery[:,1]])
#Generate Image-Dicts for training, query and gallery of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict = {}
for img_path, key in train:
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(opt.source_path+'/'+img_path)
query_image_dict = {}
for img_path, key in query:
if not key in query_image_dict.keys():
query_image_dict[key] = []
query_image_dict[key].append(opt.source_path+'/'+img_path)
gallery_image_dict = {}
for img_path, key in gallery:
if not key in gallery_image_dict.keys():
gallery_image_dict[key] = []
gallery_image_dict[key].append(opt.source_path+'/'+img_path)
### Uncomment this if super-labels should be used to generate resp.datasets
# super_train_image_dict, counter, super_assign = {},0,{}
# for img_path, _ in train:
# key = '_'.join(img_path.split('/')[1:3])
# if key not in super_assign.keys():
# super_assign[key] = counter
# counter += 1
# key = super_assign[key]
#
# if not key in super_train_image_dict.keys():
# super_train_image_dict[key] = []
# super_train_image_dict[key].append(opt.source_path+'/'+img_path)
# super_train_dataset = BaseTripletDataset(super_train_image_dict, opt, is_validation=True)
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
query_dataset = BaseTripletDataset(query_image_dict, opt, is_validation=True)
gallery_dataset = BaseTripletDataset(gallery_image_dict, opt, is_validation=True)
return {'training':train_dataset, 'testing_query':query_dataset, 'evaluation':eval_dataset, 'testing_gallery':gallery_dataset}
# return {'training':train_dataset, 'testing_query':query_dataset, 'evaluation':eval_dataset, 'testing_gallery':gallery_dataset, 'super_evaluation':super_train_dataset}
def give_VehicleID_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the PKU Vehicle dataset.
For Metric Learning, training and (multiple) test sets are provided by separate text files, train_list and test_list_<n_classes_2_test>.txt.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
#Load respective text-files
train = np.array(pd.read_table(opt.source_path+'/train_test_split/train_list.txt', header=None, delim_whitespace=True))
small_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_800.txt', header=None, delim_whitespace=True))
medium_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_1600.txt', header=None, delim_whitespace=True))
big_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_2400.txt', header=None, delim_whitespace=True))
#Generate conversions
lab_conv = {x:i for i,x in enumerate(np.unique(train[:,1]))}
train[:,1] = np.array([lab_conv[x] for x in train[:,1]])
lab_conv = {x:i for i,x in enumerate(np.unique(np.concatenate([small_test[:,1], medium_test[:,1], big_test[:,1]])))}
small_test[:,1] = np.array([lab_conv[x] for x in small_test[:,1]])
medium_test[:,1] = np.array([lab_conv[x] for x in medium_test[:,1]])
big_test[:,1] = np.array([lab_conv[x] for x in big_test[:,1]])
#Generate Image-Dicts for training and different testings of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict = {}
for img_path, key in train:
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
small_test_dict = {}
for img_path, key in small_test:
if not key in small_test_dict.keys():
small_test_dict[key] = []
small_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
medium_test_dict = {}
for img_path, key in medium_test:
if not key in medium_test_dict.keys():
medium_test_dict[key] = []
medium_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
big_test_dict = {}
for img_path, key in big_test:
if not key in big_test_dict.keys():
big_test_dict[key] = []
big_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
val_small_dataset = BaseTripletDataset(small_test_dict, opt, is_validation=True)
val_medium_dataset = BaseTripletDataset(medium_test_dict, opt, is_validation=True)
val_big_dataset = BaseTripletDataset(big_test_dict, opt, is_validation=True)
return {'training':train_dataset, 'testing_set1':val_small_dataset, 'testing_set2':val_medium_dataset, \
'testing_set3':val_big_dataset, 'evaluation':eval_dataset}
################## BASIC PYTORCH DATASET USED FOR ALL DATASETS ##################################
class BaseTripletDataset(Dataset):
"""
Dataset class to provide (augmented) correctly prepared training samples corresponding to standard DML literature.
This includes normalizing to ImageNet-standards, and Random & Resized cropping of shapes 224 for ResNet50 and 227 for
GoogLeNet during Training. During validation, only resizing to 256 or center cropping to 224/227 is performed.
"""
def __init__(self, image_dict, opt, samples_per_class=8, is_validation=False):
"""
Dataset Init-Function.
Args:
image_dict: dict, Dictionary of shape {class_idx:[list of paths to images belong to this class] ...} providing all the training paths and classes.
opt: argparse.Namespace, contains all training-specific parameters.
samples_per_class: Number of samples to draw from one class before moving to the next when filling the batch.
is_validation: If is true, dataset properties for validation/testing are used instead of ones for training.
Returns:
Nothing!
"""
#Define length of dataset
self.n_files = np.sum([len(image_dict[key]) for key in image_dict.keys()])
self.is_validation = is_validation
self.pars = opt
self.image_dict = image_dict
self.avail_classes = sorted(list(self.image_dict.keys()))
#Convert image dictionary from classname:content to class_idx:content, because the initial indices are not necessarily from 0 - <n_classes>.
self.image_dict = {i:self.image_dict[key] for i,key in enumerate(self.avail_classes)}
self.avail_classes = sorted(list(self.image_dict.keys()))
#Init. properties that are used when filling up batches.
if not self.is_validation:
self.samples_per_class = samples_per_class
#Select current class to sample images from up to <samples_per_class>
self.current_class = np.random.randint(len(self.avail_classes))
self.classes_visited = [self.current_class, self.current_class]
self.n_samples_drawn = 0
#Data augmentation/processing methods.
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
if not self.is_validation:
transf_list.extend([transforms.RandomResizedCrop(size=224) if opt.arch=='resnet50' else transforms.RandomResizedCrop(size=227),
transforms.RandomHorizontalFlip(0.5)])
else:
transf_list.extend([transforms.Resize(256),
transforms.CenterCrop(224) if opt.arch=='resnet50' else transforms.CenterCrop(227)])
transf_list.extend([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(transf_list)
#Convert Image-Dict to list of (image_path, image_class). Allows for easier direct sampling.
self.image_list = [[(x,key) for x in self.image_dict[key]] for key in self.image_dict.keys()]
self.image_list = [x for y in self.image_list for x in y]
#Flag that denotes if dataset is called for the first time.
self.is_init = True
def ensure_3dim(self, img):
"""
Function that ensures that the input img is three-dimensional.
Args:
img: PIL.Image, image which is to be checked for three-dimensionality (i.e. if some images are black-and-white in an otherwise coloured dataset).
Returns:
Checked PIL.Image img.
"""
if len(img.size)==2:
img = img.convert('RGB')
return img
def __getitem__(self, idx):
"""
Args:
idx: Sample idx for training sample
Returns:
tuple of form (sample_class, torch.Tensor() of input image)
"""
if self.is_init:
self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
self.classes_visited = self.classes_visited[1:]+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
return self.current_class,out_img
else:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
def __len__(self):
return self.n_files
| StarcoderdataPython |
3282117 | import clr
import time
from System.Reflection import Assembly
dynamoCore = Assembly.Load("DynamoCore")
dynVersion = dynamoCore.GetName().Version.ToString()
dynVersionInt = int(dynVersion[0])*10+int(dynVersion[2])
class WorksharingLog:
def __init__(self, version, sessions):
self.Version = version
self.Sessions = sessions
self.SessionCount = len(sessions)
self.ProcessingTime = None
def __repr__(self):
return "WorksharingLog"
def AllSessionsUseSameBuild(self):
return len(set([x.RevitBuild for x in self.Sessions])) == 1
def GetSessionByID(self, ID):
sessionlookup = [x for x in sessions if x.ID == ID]
if len(sessionlookup) > 0: return sessionlookup[0]
else: return None
class WorksharingSession:
def __init__(self, id):
self.ID = id
self.Start = None
self.End = None
self.Date = None
self.Duration = None
self.User = None
self.RevitVersion = None
self.RevitBuild = None
self.Journal = None
self.HostAddress = None
self.HostName = None
self.ServerAddress = None
self.ServerName = None
self.Central = None
self.Events = []
def __repr__(self):
return "WorksharingSession"
def GetLoadDuration(self):
if len([x for x in self.Events if x.Text == ">Open"]) == 0: return None
else:
openStart = [x.DateTime for x in self.Events if x.Text == ">Open"][0]
openEnd = [x.DateTime for x in self.Events if x.Text == "<Open"]
wsconfigStart = [x.DateTime for x in self.Events if x.Text == ">WSConfig"]
wsconfigEnd = [x.DateTime for x in self.Events if x.Text == "<WSConfig"]
if len(openEnd) == 0: return None
elif len(wsconfigStart) > 0 and len(wsconfigEnd) > 0: return (openEnd[0] - openStart) + (wsconfigEnd[0] - wsconfigStart[0])
else: return openEnd[0] - openStart
def GetLoadedLinks(self):
links = []
for event in self.Events:
if event.Text.startswith(">OpenLink"): links.append(LoadedLink(event.DateTime, event.Text.split("\"")[1]))
elif event.Text == "<OpenLink" and len(links) > 0:
links[-1].LoadEnd = event.DateTime
links[-1].LoadDuration = event.DateTime - links[-1].LoadStart
return links
def GetSessionType(self):
containsOpen = len([x for x in self.Events if x.Text == ">Open"]) > 0
containsOpenCentral = len([x for x in self.Events if x.Text == ">Open:Central"]) > 0
containsSTC = len([x for x in self.Events if x.Text == ">STC"]) > 0
containsWSD = len([x for x in self.Events if x.Text == ">WSD"]) > 0
containsReconnect = len([x for x in self.Events if x.Text.startswith(".ReconnectInMiddle")]) > 0
if containsReconnect: return "Reconnected"
if not containsOpen and containsSTC: return "CreateNewCentral"
elif containsOpenCentral:
if self.Events[1].Text == ">Open": return "CreateDetached"
else: return "WorkInCentral"
elif not containsOpen and containsWSD: return "ChooseWorksets"
elif containsOpen and not containsOpenCentral: return "CreateLocalCopy"
else: return "Unknown"
def GetSyncEvents(self):
events = []
for event in self.Events:
if event.Text == ">STC": events.append(SyncEvent(event.DateTime))
elif event.Text == ">STC:RL:Read": events[-1].ReloadLatestCount += 1
elif event.Text == ".STC:RL:LockRoot RW gaveUp": events[-1].WasAborted = True
elif event.Text == "<STC" and len(events) > 0:
events[-1].End = event.DateTime
events[-1].Duration = event.DateTime - events[-1].Start
return events
def HasLoadedLinks(self):
return len([x for x in self.Events if x.Text == "<OpenLink"]) > 0
def WasTerminatedProperly(self):
return self.End != None
class WorksharingEvent:
def __init__(self, timestamp, text):
self.DateTime = timestamp
self.Text = text
def __repr__(self):
return "WorksharingEvent"
class SyncEvent:
def __init__(self, start):
self.Start = start
self.End = None
self.Duration = None
self.ReloadLatestCount = 0
self.WasAborted = False
def __repr__(self):
return "SyncEvent"
class LoadedLink:
def __init__(self, start, linkpath):
self.LoadStart = start
self.LoadEnd = None
self.LoadDuration = None
self.FileName = linkpath.split("\\")[-1]
self.FullPath = linkpath
def __repr__(self):
return "LoadedLink"
def WSLogFromPath(path):
try:
processing_started = time.time()
sessions = []
version = None
with open(path, 'r') as slog:
if dynVersionInt >= 21: slog = slog.read().decode('utf-16le').split("\n")
for line in slog:
line = line.lstrip().rstrip('\n')
if line.startswith("$"):
contents = line.split()
session_lookup = [x for x in sessions if x.ID == contents[0]]
if len(session_lookup) == 0:
sessions.append(WorksharingSession(contents[0]))
current_session = sessions[-1]
else: current_session = session_lookup[0]
timestamp = time.strptime(contents[1] + " " + contents[2])
text = ' '.join(contents[3:])
event = WorksharingEvent(timestamp, text)
current_session.Events.append(event)
if text.startswith(">Session"):
sessions[-1].Start = timestamp
sessions[-1].Date = timestamp.Date
elif text.startswith(".ReconnectInMiddle"):
current_session.Start = current_session.Events[0].DateTime
current_session.Date = current_session.Events[0].DateTime.Date
else: current_session.End = timestamp
elif line.startswith("user"): sessions[-1].User = line.split('user="')[-1][:-1]
elif line.startswith("build"):
versioninfo = line.split('build="')[-1].split()
sessions[-1].RevitVersion = int(versioninfo[0])
sessions[-1].RevitBuild = versioninfo[-1][:-1]
elif line.startswith("journal"): sessions[-1].Journal = line.split('journal="')[-1][:-1]
elif line.startswith("host"):
hostinfo = line.split('host=')[-1].split()
sessions[-1].HostAddress = hostinfo[0]
sessions[-1].HostName = hostinfo[-1][1:-1]
elif line.startswith("server"):
serverinfo = line.split('server=')[-1].split()
sessions[-1].ServerAddress = serverinfo[0]
sessions[-1].ServerName = serverinfo[-1][1:-1]
elif line.startswith("central"): sessions[-1].Central = line.split('central="')[-1][:-1]
elif line.startswith("Worksharing"): version = line.split("Version ")[-1].split(",")[0]
for session in sessions:
if session.Start and session.End: session.Duration = session.End - session.Start
WSLog = WorksharingLog(version, sessions)
WSLog.ProcessingTime = time.time() - processing_started
return WSLog
except:
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import traceback
return traceback.format_exc()
if isinstance(IN[0], list): OUT = [WSLogFromPath(x) for x in IN[0]]
else: OUT = WSLogFromPath(IN[0]) | StarcoderdataPython |
1733591 | <reponame>pthalin/instaclient
from instaclient.client import *
if TYPE_CHECKING:
from instaclient.client.instaclient import InstaClient
from instaclient.client.checker import Checker
class Navigator(Checker):
# NAVIGATION PROCEDURES
def _show_nav_bar(self:'InstaClient'):
if self.driver.current_url != ClientUrls.HOME_URL:
self._nav_home()
self._dismiss_dialogue()
self._dismiss_useapp_bar()
def _nav_home(self:'InstaClient', manual=False):
"""Navigates to IG home page
"""
if not manual:
if self.driver.current_url != ClientUrls.HOME_URL:
self.driver.get(ClientUrls.HOME_URL)
self._dismiss_dialogue()
else:
self._show_nav_bar()
home_btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.HOME_BTN)))
self._press_button(home_btn)
def _nav_user(self:'InstaClient', user:str, check_user:bool=True):
"""
Navigates to a users profile page
Args:
user:str: Username of the user to navigate to the profile page of
check_user:bool: Condition whether to check if a user is valid or not
Returns:
True if operation is successful
Raises:
InvaildUserError if user does not exist
"""
if check_user:
result = self.is_valid_user(user=user)
if self.driver.current_url != ClientUrls.NAV_USER.format(user):
self.driver.get(ClientUrls.NAV_USER.format(user))
self._dismiss_useapp_bar()
def _nav_user_dm(self:'InstaClient', user:str, check_user:bool=True):
"""
Open DM page with a specific user
Args:
user:str: Username of the user to send the dm to
Raises:
InvalidUserError if user does not exist
Returns:
True if operation was successful
"""
try:
self._nav_user(user, check_user=check_user)
private = False
LOGGER.debug('INSTACLIENT: User <{}> is valid and public (or followed)'.format(user))
except PrivateAccountError:
private = True
LOGGER.debug('INSTACLIENT: User <{}> is private'.format(user))
# TODO NEW VERSION: Opens DM page and creates new DM
try:
# LOAD PAGE
LOGGER.debug('LOADING PAGE')
self.driver.get(ClientUrls.NEW_DM)
user_div:WebElement = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.USER_DIV)), wait_time=10)
LOGGER.debug('Page Loaded')
# INPUT USERNAME
input_div:WebElement = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.SEARCH_USER_INPUT)), wait_time=15)
LOGGER.debug(f'INPUT: {input_div}')
input_div.send_keys(user)
LOGGER.debug('Sent Username to Search Field')
time.sleep(1)
# FIND CORRECT USER DIV
user_div:WebElement = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.USER_DIV)))
username_div:WebElement = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.USER_DIV_USERNAME)))
LOGGER.debug('Found user div')
self._press_button(user_div)
LOGGER.debug('Selected user div')
time.sleep(1)
next = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.NEXT_BUTTON)))
self._press_button(next)
LOGGER.debug('Next pressed')
return True
except Exception as error:
LOGGER.error('There was error navigating to the user page: ', exc_info=error)
raise InstaClientError('There was an error when navigating to <{}>\'s DMs'.format(user))
def _nav_post(self:'InstaClient', shortcode:str):
url = ClientUrls.POST_URL.format(shortcode)
if self.driver.current_url is not url:
self.driver.get(url)
result = self._is_valid_page(url)
if not result:
raise InvalidShortCodeError(shortcode)
self._dismiss_useapp_bar()
LOGGER.debug('Got Post\'s Page')
return True
def _nav_post_comments(self:'InstaClient', shortcode:str):
url = ClientUrls.COMMENTS_URL.format(shortcode)
if self.driver.current_url is not url:
if self.driver.current_url == ClientUrls.POST_URL.format(shortcode):
# Press Comment Button
btn = self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.COMMENT_BTN)))
if btn:
btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.COMMENT_BTN)))
self._press_button(btn)
else:
pass
if self.driver.current_url != url:
self.driver.get(url)
result = self._is_valid_page(url)
if not result:
raise InvalidShortCodeError(shortcode)
LOGGER.debug('Got Post\'s Comments Page')
return True
def _nav_tag(self:'InstaClient', tag:str):
"""Navigates to a search for posts with a specific tag on IG.
Args:
tag:str: Tag to search for
"""
self.driver.get(ClientUrls.SEARCH_TAGS.format(tag))
if self._is_valid_page(ClientUrls.SEARCH_TAGS.format(tag)):
return True
else:
raise InvaildTagError(tag)
def _nav_location(self:'InstaClient', id:str, slug:str):
"""Navigates to the page of the location specified by
the `id` and `slug`.
Args:
id (str): ID of the location to navigate to.
slug (str): Slug of the location to navigate to.
"""
self.driver.get(ClientUrls.LOCATION_PAGE.format(id, slug))
if self._is_valid_page(ClientUrls.LOCATION_PAGE.format(id, slug)):
return True
else:
raise InvaildLocationError(id, slug)
def _nav_explore(self:'InstaClient', manual=False):
"""Navigates to the explore page
"""
if not manual:
self.driver.get(ClientUrls.EXPLORE_PAGE)
if self._is_valid_page(ClientUrls.EXPLORE_PAGE):
return True
return False
else:
self._show_nav_bar()
explore_btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.EXPLORE_BTN)))
self._press_button(explore_btn)
| StarcoderdataPython |
6569324 | <filename>ARMODServers/Apps/Index/migrations/0005_indexnavbar.py<gh_stars>1-10
# Generated by Django 3.1.4 on 2021-04-20 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Index', '0004_auto_20210420_2254'),
]
operations = [
migrations.CreateModel(
name='IndexNavbar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, null=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('sort_id', models.IntegerField(db_index=True, default=0, verbose_name='Navbar sort id')),
('navbar_title', models.CharField(default='', max_length=16, verbose_name='Navbar title')),
('navbar_url', models.CharField(blank=True, default='', max_length=256, verbose_name='Navbar url')),
('navbar_icon', models.CharField(blank=True, default='', max_length=32, verbose_name='Navbar icon')),
],
options={
'verbose_name': 'navbar',
'verbose_name_plural': 'navbar',
'db_table': 'armod_index_navbar',
},
),
]
| StarcoderdataPython |
8166101 | <filename>boa3_test/test_sc/native_test/contractmanagement/DeployContract.py
from typing import Any
from boa3.builtin import public
from boa3.builtin.interop.contract import Contract
from boa3.builtin.nativecontract.contractmanagement import ContractManagement
@public
def Main(script: bytes, manifest: bytes, data: Any) -> Contract:
return ContractManagement.deploy(script, manifest, data)
| StarcoderdataPython |
12844592 | <gh_stars>10-100
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
from openomics_web.utils.str_utils import longest_common_prefix
def DataTableColumnSelect(columns):
"""
Args:
columns:
"""
longest_common_prefixes = longest_common_prefix(columns)
return html.Div([
html.Div(['Select the gene id/name column to index by:']),
dcc.Dropdown(
id='data-table-genes-col-name',
options=[{'label': col, 'value': col} for col in columns],
style={
'width': '100%',
},
value=columns[0],
),
html.Div(['Select the column prefixes to import:']),
dcc.Dropdown(
id='data-table-columns-select',
options=[{'label': col, 'value': col} for col in longest_common_prefixes],
style={
'width': '100%',
},
multi=True,
)
])
def ExpressionDataTable(df):
"""
Args:
df:
"""
return html.Div(
className="row",
children=[
html.Div(
dt.DataTable(
id='expression-datatable',
columns=[{"name": i, "id": i} for i in df.columns],
page_current=0,
page_size=20,
page_action='custom',
filter_action='custom',
filter_query='',
sort_action='custom',
sort_mode='multi',
sort_by=[],
style_as_list_view=True,
style_cell={
'overflow': 'hidden',
'textOverflow': 'clip',
'whiteSpace': 'normal'
},
style_data={'width': '30px'},
style_data_conditional=[
{'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
},
],
style_table={"maxHeight": '800px',
'width': '800px',
'marginTop': '5px',
'marginBottom': '10px',
'overflowX': 'scroll'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
row_selectable="multi",
selected_rows=[],
# virtualization=True,
),
style={'height': 750, 'overflowY': 'scroll'},
className='six columns'
),
html.Div(
id='table-paging-with-graph-container',
className="five columns"
)
]
)
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part):
"""
Args:
filter_part:
"""
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
def expression_data_view():
return html.Div(id='table-container', children=[dt.DataTable(
id="data-table",
row_selectable='multi',
# sorting=True,
# filtering=True,
css=[{
"selector": ".dash-cell div.dash-cell-value",
"rule": "display: inline; "
"white-space: inherit; "
"overflow: auto; "
"text-overflow: inherit;"
}],
style_cell={
"whiteSpace": "no-wrap",
"overflow": "hidden",
"textOverflow": "ellipsis",
"maxWidth": 100,
'fontWeight': 100,
'fontSize': '11pt',
'fontFamily': 'Courier New',
'backgroundColor': '#1F2132'
},
style_header={
'backgroundColor': '#1F2132',
'textAlign': 'center'
},
style_table={
"maxHeight": "310px",
'width': '320px',
'marginTop': '5px',
'marginBottom': '10px',
},
# n_fixed_rows=1,
# n_fixed_columns=1
)])
| StarcoderdataPython |
1773435 | <reponame>ConvertGroupLabs/pairing-functions
# -*- coding: utf-8 -*-
import pytest
from pairing_functions.szudzik import pair, unpair
class TestSzudzikPairing(object):
def test_pair(self) -> None:
assert pair(0, 0) == 0
assert pair(0, 1) == 1
assert pair(1, 0) == 2
assert pair(2, 2) == 8
assert pair(3, 4) == 19
assert pair(92, 23) == 8579
def test_pair_multiple_numbers(self) -> None:
assert pair(1, 2, 3) == 33
assert pair(3, 4, 5) == 385
assert pair(1, 2, 3, 4) == 1126
assert pair(1, 2, 3, 4, 5) == 1269007
def test_pair_exceptions(self) -> None:
with pytest.raises(ValueError):
assert pair(1)
with pytest.raises(ValueError):
assert pair(1, -2)
with pytest.raises(ValueError):
assert pair(1,)
with pytest.raises(ValueError):
assert pair(1, -2)
class TestSzudzikUnpair(object):
def test_unpair(self) -> None:
assert unpair(0) == (0, 0)
assert unpair(1) == (0, 1)
assert unpair(2) == (1, 0)
assert unpair(19) == (3, 4)
assert unpair(8579) == (92, 23)
assert unpair(33) == (5, 3)
assert unpair(33, n=3) == (1, 2, 3)
assert unpair(385) == (19, 5)
assert unpair(385, n=3) == (3, 4, 5)
assert unpair(1126) == (33, 4)
assert unpair(1126, n=3) == (5, 3, 4)
assert unpair(1126, n=4) == (1, 2, 3, 4)
def test_unpair_exceptions(self) -> None:
with pytest.raises(ValueError):
assert unpair(0.5)
with pytest.raises(ValueError):
assert unpair(-1)
class TestSzudzik(object):
def test_inverse_property(self) -> None:
n1, n2 = unpair(pair(1, 2))
assert n1 == 1 and n2 == 2
n1, n2 = unpair(33)
assert pair(n1, n2) == 33
| StarcoderdataPython |
9787366 | from flask import Flask, render_template
import pymongo
import random
app = Flask(__name__)
@app.route('/')
def mostrar_usuario():
client = pymongo.MongoClient("mongodb://db:27017/")
db = client["mi-bd"]
personas = []
for x in db.coll.find():
personas.append(x)
persona = personas[random.randint(0, len(personas) - 1)]
return render_template('usuariorandom.html', **persona) | StarcoderdataPython |
8099916 | <filename>openslides_backend/action/actions/poll/mixins.py
from decimal import Decimal
from typing import Any, Dict, List
from ....permissions.permission_helper import has_perm
from ....permissions.permissions import Permission, Permissions
from ....services.datastore.commands import GetManyRequest
from ....services.datastore.interface import DatastoreService
from ....shared.exceptions import MissingPermission
from ....shared.patterns import KEYSEPARATOR, Collection, FullQualifiedId
from ...action import Action
from ..projector_countdown.mixins import CountdownControl
class PollPermissionMixin(Action):
def check_permissions(self, instance: Dict[str, Any]) -> None:
if "meeting_id" in instance:
content_object_id = instance.get("content_object_id", "")
meeting_id = instance["meeting_id"]
else:
poll = self.datastore.get(
FullQualifiedId(Collection("poll"), instance["id"]),
["content_object_id", "meeting_id"],
)
content_object_id = poll.get("content_object_id", "")
meeting_id = poll["meeting_id"]
check_poll_or_option_perms(
self.name, content_object_id, self.datastore, self.user_id, meeting_id
)
def check_poll_or_option_perms(
action_name: str,
content_object_id: str,
datastore: DatastoreService,
user_id: int,
meeting_id: int,
) -> None:
if content_object_id.startswith("motion" + KEYSEPARATOR):
perm: Permission = Permissions.Motion.CAN_MANAGE_POLLS
elif content_object_id.startswith("assignment" + KEYSEPARATOR):
perm = Permissions.Assignment.CAN_MANAGE
else:
perm = Permissions.Poll.CAN_MANAGE
if not has_perm(datastore, user_id, perm, meeting_id):
raise MissingPermission(perm)
class StopControl(CountdownControl, Action):
def on_stop(self, instance: Dict[str, Any]) -> None:
poll = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]),
["state", "meeting_id", "voted_ids"],
)
# reset countdown given by meeting
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), poll["meeting_id"]),
[
"poll_couple_countdown",
"poll_countdown_id",
"users_enable_vote_weight",
],
)
if meeting.get("poll_couple_countdown") and meeting.get("poll_countdown_id"):
self.control_countdown(meeting["poll_countdown_id"], "reset")
# calculate votescast, votesvalid, votesinvalid
voted_ids = poll.get("voted_ids", [])
instance["votescast"] = str(Decimal("0.000000") + Decimal(len(voted_ids)))
if not meeting.get("users_enable_vote_weight") or not voted_ids:
instance["votesvalid"] = instance["votescast"]
else:
gmr = GetManyRequest(
Collection("user"), voted_ids, [f"vote_weight_${poll['meeting_id']}"]
)
gm_result = self.datastore.get_many([gmr])
users = gm_result.get(Collection("user"), {}).values()
instance["votesvalid"] = str(
sum(
Decimal(entry.get(f"vote_weight_${poll['meeting_id']}", "1.000000"))
for entry in users
)
)
instance["votesinvalid"] = "0.000000"
# set entitled users at stop.
instance["entitled_users_at_stop"] = self.get_entitled_users(poll)
def get_entitled_users(self, poll: Dict[str, Any]) -> List[Dict[str, Any]]:
entitled_users = []
entitled_users_ids = set()
all_voted_users = poll.get("voted_ids", [])
meeting_id = poll["meeting_id"]
# get all users from the groups.
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), meeting_id), ["group_ids"]
)
gmr = GetManyRequest(
Collection("group"), meeting.get("group_ids", []), ["user_ids"]
)
gm_result = self.datastore.get_many([gmr])
groups = gm_result.get(Collection("group"), {}).values()
for group in groups:
user_ids = group.get("user_ids", [])
if not user_ids:
continue
gmr = GetManyRequest(
Collection("user"),
list(user_ids),
[
"id",
"is_present_in_meeting_ids",
f"vote_delegated_${meeting_id}_to_id",
],
)
gm_result = self.datastore.get_many([gmr])
users = gm_result.get(Collection("user"), {}).values()
for user in users:
vote_delegated = {}
if user.get(f"vote_delegated_${meeting_id}_to_id"):
vote_delegated = self.datastore.get(
FullQualifiedId(
Collection("user"),
user[f"vote_delegated_${meeting_id}_to_id"],
),
["is_present_in_meeting_ids"],
)
if user["id"] in entitled_users_ids:
continue
elif poll["meeting_id"] in user.get(
"is_present_in_meeting_ids", []
) or (
user.get(f"vote_delegated_${meeting_id}_to_id")
and poll["meeting_id"]
in vote_delegated.get("is_present_in_meeting_ids", [])
):
entitled_users_ids.add(user["id"])
entitled_users.append(
{
"user_id": user["id"],
"voted": user["id"] in all_voted_users,
"vote_delegated_to_id": user.get(
f"vote_delegated_${meeting_id}_to_id"
),
}
)
return entitled_users
| StarcoderdataPython |
9761408 | <filename>lantern/grids/grid_qgrid.py<gh_stars>100-1000
def qgrid_grid(df):
from qgrid import show_grid
return show_grid(df)
| StarcoderdataPython |
4892234 | from sqlalchemy import Boolean, Column, DateTime, Integer, String
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils import ChoiceType, EmailType, PhoneNumberType
from .base import BaseModel
from .lib import OrderedEnum
from .meta import Base
class Role(OrderedEnum):
USER = 10
ADMIN = 20
class User(Base, BaseModel):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
first_name = Column(String(100), nullable=False)
last_name = Column(String(100), nullable=False)
email = Column(EmailType)
phone = Column(PhoneNumberType())
role = Column(ChoiceType(Role, impl=Integer()), nullable=False)
is_enabled = Column(Boolean, nullable=False, default=True)
last_signed_in_at = Column(DateTime)
@hybrid_property
def is_admin(self):
role = self.role
return role and role >= Role.ADMIN
@hybrid_property
def name(self):
return f'{self.first_name} {self.last_name}'
| StarcoderdataPython |
3487891 | # Sample code from http://www.redblobgames.com/pathfinding/
# Copyright 2014 <NAME> <<EMAIL>>
#
# Feel free to use this code in your own projects, including commercial projects
# License: Apache v2.0 <http://www.apache.org/licenses/LICENSE-2.0.html>
class SimpleGraph:
def __init__(self):
self.edges = {}
def neighbors(self, id):
return self.edges[id]
example_graph = SimpleGraph()
example_graph.edges = {
'A': ['B'],
'B': ['A', 'C', 'D'],
'C': ['A'],
'D': ['E', 'A'],
'E': ['B']
}
import collections
class Queue:
def __init__(self):
self.elements = collections.deque()
def empty(self):
return len(self.elements) == 0
def put(self, x):
self.elements.append(x)
def get(self):
return self.elements.popleft()
# utility functions for dealing with square grids
def from_id_width(id, width):
return (id % width, id // width)
def draw_tile(graph, id, style, width):
r = "."
if 'number' in style and id in style['number']: r = "%d" % style['number'][id]
if 'point_to' in style and style['point_to'].get(id, None) is not None:
(x1, y1) = id
(x2, y2) = style['point_to'][id]
if x2 == x1 + 1: r = "\u2192"
if x2 == x1 - 1: r = "\u2190"
if y2 == y1 + 1: r = "\u2193"
if y2 == y1 - 1: r = "\u2191"
if 'start' in style and id == style['start']: r = "A"
if 'goal' in style and id == style['goal']: r = "Z"
if 'path' in style and id in style['path']: r = "@"
if id in graph.walls: r = "#" * width
return r
def draw_grid(graph, width=2, **style):
for y in range(graph.height):
for x in range(graph.width):
print("%%-%ds" % width % draw_tile(graph, (x, y), style, width), end="")
print()
# data from main article
DIAGRAM1_WALLS = [from_id_width(id, width=30) for id in [21,22,51,52,81,82,93,94,111,112,123,124,133,134,141,142,153,154,163,164,171,172,173,174,175,183,184,193,194,201,202,203,204,205,213,214,223,224,243,244,253,254,273,274,283,284,303,304,313,314,333,334,343,344,373,374,403,404,433,434]]
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
def in_bounds(self, id):
(x, y) = id
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id):
return id not in self.walls
def neighbors(self, id):
(x, y) = id
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
if (x + y) % 2 == 0: results.reverse() # aesthetics
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
class GridWithWeights(SquareGrid):
def __init__(self, width, height):
super().__init__(width, height)
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
diagram4 = GridWithWeights(10, 10)
diagram4.walls = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8)]
diagram4.weights = {loc: 5 for loc in [(3, 4), (3, 5), (4, 1), (4, 2),
(4, 3), (4, 4), (4, 5), (4, 6),
(4, 7), (4, 8), (5, 1), (5, 2),
(5, 3), (5, 4), (5, 5), (5, 6),
(5, 7), (5, 8), (6, 2), (6, 3),
(6, 4), (6, 5), (6, 6), (6, 7),
(7, 3), (7, 4), (7, 5)]}
import heapq
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def dijkstra_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def convert (matrix):
newmatrix={}
g = GridWithWeights(41, 21)
for i in range(0,len(matrix)):
for j in range(0,len(matrix[0])):
if matrix[i][j]=='+' or matrix[i][j]=='-' or matrix[i][j]=='|':
newmatrix[i,j]=10000
g.walls.append((j,i))
else:
newmatrix[i,j]=1
return g
def genmovements(path):
move=""
for i in range(0,len(path)-1):
x1,y1=path[i]
x2,y2=path[i+1]
dx=x2-x1
dy=y2-y1
if dx==0 and dy==0:
continue
if dx==1:
move=move+"d"
if dx==-1:
move=move+"a"
if dy==1:
move=move+"s"
if dy==-1:
move=move+"w"
return move
import telnetlib
import re
import time
HOST = "hackyou-ppc300.ctf.su"
tn=telnetlib.Telnet(HOST,port=11111)
p3= re.compile('[+][+ |O\r\n-]{899}[+]',re.IGNORECASE)
p4= re.compile('[\S ]+level[ \S]+',re.IGNORECASE)
p5= re.compile('[\S ]+flag[ \S]+',re.IGNORECASE)
levels=[]
flags=[]
dat=tn.read_until(b'+\r\n\r\n')
dat=dat.decode('utf-8')
#print('dat is:')
print(dat)
#dat,split
matrix=p3.findall(dat)
level=p4.findall(dat)
flag=p5.findall(dat)
if level!=[]:
levels.append(level[len(level)-1])
if flag!=[]:
flags.append(flag[len(flag)-1])
tn.write('\n'.encode('utf-8'))
for i in range(0,337):
print('iteration')
print(i)
dat=tn.read_until(b'+\r\n\r\n')
dat=dat.decode('utf-8')
#print('dat is:')
#print(dat)
#dat,split
matrix=p3.findall(dat)
level=p4.findall(dat)
flag=p5.findall(dat)
if level!=[]:
levels.append(level[len(level)-1])
if flag!=[]:
flags.append(flag[len(flag)-1])
print(levels[len(levels)-1])
print(flags[len(flags)-1])
if len(matrix)==0:
continue
matrix=matrix[len(matrix)-1]
#print(matrix)
matrix=matrix.split('\r\n')
newmatrix=convert(matrix)
startx=1
starty=19
for i in range(0,len(matrix)):
for j in range(0,len(matrix[0])):
if matrix[i][j]=='O':
startx=j
starty=i
print('start')
print(startx)
print(starty)
finishx=39
finishy=1
came_from, cost_so_far = a_star_search(newmatrix, (startx, starty), (finishx, finishy))
#draw_grid(newmatrix, width=1, point_to=came_from, start= (startx, starty), goal=(finishx, finishy))
#draw_grid(newmatrix, width=3, number=cost_so_far, start= (startx, starty), goal=(finishx, finishy))
#print(came_from)
path=reconstruct_path(came_from,start= (startx, starty), goal=(finishx, finishy))
#print(path)
movems=genmovements(path)
#print(movems)
for rep in range(0,len(movems)):
#print('rep='+ str(rep)+movems[rep])
tn.write(movems[rep].encode('utf-8'))
dat=tn.read_until(b'+\r\n\r\n')
dat=dat.decode('utf-8')
print('.', end="")
#print(dat)
#time.sleep(0.1)
matrix=[]
#tn.write('\n'.encode('utf-8'))
print(tn.read_all())
| StarcoderdataPython |
5086201 | <gh_stars>1-10
import optmod
import unittest
import numpy as np
class TestVariableDicts(unittest.TestCase):
def test_construction_with_tuples(self):
x = optmod.VariableDict([(1,2), ('tt', 4)], name='x')
self.assertTrue(isinstance(x[(1,2)], optmod.VariableScalar))
self.assertTrue(isinstance(x[('tt',4)], optmod.VariableScalar))
self.assertRaises(KeyError, lambda a: x[a], 50)
def test_construction(self):
x = optmod.VariableDict(['a', 'b'], name='foo')
self.assertTrue(isinstance(x, dict))
self.assertEqual(len(x), 2)
xa = x['a']
self.assertTrue(isinstance(xa, optmod.VariableScalar))
self.assertEqual(xa.get_value(), 0.)
self.assertTrue(xa.is_continuous())
self.assertEqual(xa.name, 'foo_a')
xb = x['b']
self.assertTrue(isinstance(xb, optmod.VariableScalar))
self.assertEqual(xb.get_value(), 0.)
self.assertTrue(xb.is_continuous())
self.assertEqual(xb.name, 'foo_b')
x = optmod.VariableDict(['a', 'b'], name='bar', value={'a': 10, 'c': 100})
self.assertTrue(isinstance(x, dict))
self.assertEqual(len(x), 2)
xa = x['a']
self.assertTrue(isinstance(xa, optmod.VariableScalar))
self.assertEqual(xa.get_value(), 10.)
self.assertTrue(xa.is_continuous())
self.assertEqual(xa.name, 'bar_a')
xb = x['b']
self.assertTrue(isinstance(xb, optmod.VariableScalar))
self.assertEqual(xb.get_value(), 0.)
self.assertTrue(xb.is_continuous())
self.assertEqual(xb.name, 'bar_b')
| StarcoderdataPython |
4923056 | <reponame>panghantian-kavout/DeepRL<filename>DeepRL/Agent/DoubleDQNAgent.py
import typing
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from DeepRL.Agent.AgentAbstract import AgentAbstract
from DeepRL.Env import EnvAbstract
from DeepRL.Replay.ReplayAbstract import ReplayAbstract, ReplayTuple
class DoubleDQNAgent(AgentAbstract):
def __init__(
self, _model: nn.Module,
_env: EnvAbstract,
_gamma: float, _batch_size: int,
_epsilon_init: float,
_epsilon_decay: float,
_epsilon_underline: float,
_replay: ReplayAbstract = None,
_optimizer: optim.Optimizer = None,
_err_clip: float = None, _grad_clip: float = None
):
super().__init__(_env)
self.q_func: nn.Module = _model
self.target_q_func: nn.Module = deepcopy(_model)
for p in self.target_q_func.parameters():
p.requires_grad = False
# set config
self.config.gamma = _gamma
self.config.batch_size = _batch_size
self.config.epsilon = _epsilon_init
self.config.epsilon_decay = _epsilon_decay
self.config.epsilon_underline = _epsilon_underline
self.config.err_clip = _err_clip
self.config.grad_clip = _grad_clip
self.replay = _replay
self.criterion = nn.MSELoss()
self.optimizer = _optimizer
def func(
self, _x_data: np.ndarray, _train: bool = True
) -> np.ndarray:
x_var = Variable(
torch.from_numpy(_x_data).float(),
volatile=not _train
)
return self.q_func(x_var).data.numpy()
def doTrain(self, _batch_tuples: typing.Sequence[ReplayTuple]):
# get inputs from batch
prev_x = self.getPrevInputs(_batch_tuples)
next_x = self.getNextInputs(_batch_tuples)
prev_x = Variable(torch.from_numpy(prev_x).float())
next_x = Variable(
torch.from_numpy(next_x).float(),
volatile=True
)
# calc current value estimate
prev_output = self.q_func(prev_x)
prev_action = self.getActionData(
prev_output.size(), [d.action for d in _batch_tuples]
)
prev_output = prev_output * Variable(torch.from_numpy(prev_action))
prev_output = prev_output.sum(1)
# calc target value estimate and loss
next_output = self.q_func(next_x)
next_action = self.env.getBestActions(
next_output.data.numpy(),
[t.next_state for t in _batch_tuples]
)
next_output = self.target_q_func(next_x)
target_data = self.getQTargetData(
next_output.data.numpy(), next_action, _batch_tuples
)
loss = self.criterion(
prev_output, Variable(torch.from_numpy(target_data))
)
# update q func
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
| StarcoderdataPython |
3348587 | <filename>URI.3.py
a=0
b=0
c=0
d=0
while d!=4:
d=int(input())
if d==1:
a=a+1
if d==2:
b=b+1
if d==3:
c=c+1
print("MUITO OBRIGADO")
print("Alcool: "+str(a))
print("Gasolina: "+str(b))
print("Diesel: "+str(c)) | StarcoderdataPython |
6465397 | # -*- coding: utf-8 -*-
from confiture import Confiture
def test_empty():
confiture = Confiture('tests/yaml/template/empty.yaml')
confiture.check('tests/yaml/config/empty_valid.yaml')
confiture.check('tests/yaml/config/simple_valid.yaml')
def test_simple():
confiture = Confiture('tests/yaml/template/simple.yaml')
confiture.check('tests/yaml/config/simple_valid.yaml')
def test_nested():
confiture = Confiture('tests/yaml/template/nested.yaml')
confiture.check('tests/yaml/config/nested_valid.yaml')
def test_travis_configuration():
confiture = Confiture('tests/yaml/template/travis.yaml')
confiture.check('.travis.yml')
| StarcoderdataPython |
6640949 | <filename>setup.py
import os
import re
from setuptools import find_packages, setup
def read(f):
return open(f, 'r', encoding='utf-8').read()
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('arvestust')
setup(
version=version,
name='django-arvestust',
url='https://www.github.com/lehvitus/arvestust',
author='<NAME>',
author_email='<EMAIL>',
description='A Django app for file management and record-keeping',
long_description=read('README.md'),
long_description_content_type='text/markdown',
)
| StarcoderdataPython |
6467087 | import argparse
import hypothesis
import matplotlib.pyplot as plt
import numpy as np
import torch
from hypothesis.stat import highest_density_level
from util import MarginalizedAgePrior
from util import Prior
from scipy.stats import chi2
from util import load_ratio_estimator
@torch.no_grad()
def main(arguments):
# Load the ratio estimator
ratio_estimator = load_ratio_estimator(arguments.model)
# Load the densities
densities = torch.from_numpy(np.load(arguments.data + "/density-contrasts-cut-noised.npy")).float()
# Check if the non-marginalized model has been specified
resolution = arguments.resolution
if "not-marginalized" in arguments.model:
prior = Prior()
degrees_of_freedom = 2
masses = torch.from_numpy(np.load(arguments.data + "/masses.npy")).view(-1, 1).float()
ages = torch.from_numpy(np.load(arguments.data + "/ages.npy")).view(-1, 1).float()
nominals = torch.cat([masses, ages], dim=1)
masses = torch.linspace(prior.low[0], prior.high[0] - 0.01, resolution).view(-1, 1)
masses = masses.to(hypothesis.accelerator)
ages = torch.linspace(prior.low[1], prior.high[1] - 0.01, resolution).view(-1, 1)
ages = ages.to(hypothesis.accelerator)
grid_masses, grid_ages = torch.meshgrid(masses.view(-1), ages.view(-1))
inputs = torch.cat([grid_masses.reshape(-1,1), grid_ages.reshape(-1, 1)], dim=1)
else:
prior = MarginalizedAgePrior()
degrees_of_freedom = 1
# Prepare inputs
nominals = torch.from_numpy(np.load(arguments.data + "/masses.npy")).view(-1, 1).float()
masses = torch.linspace(prior.low, prior.high - 0.01, resolution).view(-1, 1)
masses = masses.to(hypothesis.accelerator)
inputs = masses
# Prepare the diagnostic
nominals = nominals.to(hypothesis.accelerator)
densities = densities.to(hypothesis.accelerator)
results = []
indices = np.random.randint(0, len(densities), size=arguments.n)
for index in indices:
# Get current density and nominal value
nominal = nominals[index].view(1, -1)
density = densities[index].view(1, -1)
# Prepare the outputs
outputs = density.repeat(len(inputs), 1)
# Check if we have to compute Bayesian credible regions
if not arguments.frequentist:
# Compute Bayesian credible region
# Compute the posterior pdf
log_ratios = ratio_estimator.log_ratio(inputs=inputs, outputs=outputs)
log_pdf = log_ratios # Uniform prior
pdf = log_pdf.exp()
norms = (inputs - nominal).norm(dim=1).cpu().numpy()
nominal_index = np.argmin(norms)
nominal_pdf = pdf[nominal_index].item()
level = highest_density_level(pdf, arguments.level, bias=arguments.bias)
if nominal_pdf >= level:
covered = True
else:
covered = False
else:
# Compute Frequentist confidence interval based on Wilks' theorem.
# Compute the maximum theta
log_ratios = ratio_estimator.log_ratio(inputs=inputs, outputs=outputs)
max_ratio = log_ratios[log_ratios.argmax()]
test_statistic = -2 * (log_ratios - max_ratio)
test_statistic -= test_statistic.min()
x = chi2.isf(1 - arguments.level, df=degrees_of_freedom)
norms = (inputs - nominal).norm(dim=1).cpu().numpy()
nominal_index = np.argmin(norms)
if test_statistic[nominal_index].item() <= x:
covered = True
else:
covered = False
results.append(covered)
# Save the results of the diagnostic.
np.save(arguments.out, results)
def parse_arguments():
parser = argparse.ArgumentParser("Emperical coverage estimation")
parser.add_argument("--bias", type=float, default=0.0, help="Bias-term to for high-density-level estimation (default: 0.0)")
parser.add_argument("--data", type=str, default=None, help="Path of the data directory (default: none).")
parser.add_argument("--frequentist", action="store_true", help="Flag to compute frequentist confidence intervals instead of Bayesian credible regions (default: false).")
parser.add_argument("--level", type=float, default=0.95, help="Credible level (default: 0.997 - 3 sigma.)")
parser.add_argument("--model", type=str, default=None, help="Will load all ratio estimators matching this path query (default: none).")
parser.add_argument("--n", type=int, default=1000, help="Number of times to repeat the experiment (default: 1000).")
parser.add_argument("--out", type=str, default=None, help="Path of the output file (default: none).")
parser.add_argument("--resolution", type=int, default=100, help="Resolution for every variable (default: 100).")
arguments, _ = parser.parse_known_args()
return arguments
if __name__ == "__main__":
arguments = parse_arguments()
main(arguments)
| StarcoderdataPython |
3266205 | a = 1
b = 2
c = 3 | StarcoderdataPython |
3213298 | <reponame>wondadeveloppe26/checklist-seo
from setuptools import setup, find_packages
from readme_renderer.markdown import render
long_description = ""
with open('README.md', encoding='utf-8') as file:
long_description = file.read()
setup(
name='checklist-seo',
version='0.0.7',
license='MIT',
author='<NAME>',
url='https://github.com/itarverne/checklist-seo',
description='The full checklist to provide tools inside Django in order to write right content',
long_description=render(long_description),
packages=find_packages(),
long_description_content_type="text/markdown",
platforms='any',
python_requires='>=3.7',
install_requires=['Django>=3.1,<3.2', 'nltk>=3.5', 'lxml>=4.5.2'],
include_package_data=True,
package_data={'seo': ['static/js/helper.js', 'static/js/seoSidePannel.js', 'static/images/seo_logo.png', 'static/css/seo.css']},
test_suite='testing.test_api',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django :: 3.1'
],
)
| StarcoderdataPython |
4894074 | <filename>sqltask/base/engine.py
import logging
from typing import Any, Dict, Optional
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.schema import MetaData
from sqltask.engine_specs import get_engine_spec
class EngineContext:
def __init__(self,
name: str,
url: str,
metadata_kwargs: Optional[Dict[str, Any]] = None,
):
self.name = name
self.engine = create_engine(url)
self.engine_spec = get_engine_spec(self.engine.name)
url_params = self.engine_spec.get_url_params(self.engine.url)
self.database, self.schema = url_params
self.metadata_kwargs = metadata_kwargs or {}
self.metadata = MetaData(
bind=self.engine,
schema=url_params.schema,
**self.metadata_kwargs,
)
if url_params.database and url_params.schema:
url_str = url_params.database + "/" + url_params.schema
else:
url_str = url_params.database or "<Undefined>"
logging.info(f"Created engine `{name}` using "
f"`{self.engine_spec.__name__}` on `{url_str}`")
def create_new(self,
database: Optional[str],
schema: Optional[str],
) -> "EngineContext":
"""
Create a new EngineContext based on the current instance, but with a
different schema.
:param database: Database to use. If left unspecified, falls back to the database
provided by the original engine context
:param schema: Schema to use. If left unspecified, falls back to the schema
provided by the original engine context
:return: a new instance of EngineContext with different url
"""
url = make_url(str(self.engine.url))
self.engine_spec.modify_url(url, database=database, schema=schema)
return EngineContext(self.name, str(url), **self.metadata_kwargs)
| StarcoderdataPython |
6497730 | <filename>4_SC_project/students/migrations/0002_auto_20190828_1013.py
# Generated by Django 2.2.1 on 2019-08-28 10:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('students', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SStudent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=50)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=70)),
('s_slug', models.CharField(default=1, max_length=100)),
('advisor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Student',
},
),
migrations.DeleteModel(
name='Student',
),
]
| StarcoderdataPython |
4964709 | <gh_stars>1-10
#!/usr/bin/env python3
import click
import os
import sys
import tempfile
from operations.gif_it import GenerateGifIt
DEFAULT_VIDEO = 'https://www.youtube.com/watch?v=CGOPPzh8TJ4'
SHARE_DIR = '/usr/src/share'
VIDEO_QUALITY = 'context_video_quality'
def _generate_config(overrides={}):
temp_dir = tempfile.mkdtemp()
avi_path = os.path.join(temp_dir, 'nosub.avi')
if 'input_video' in overrides:
avi_path = os.path.join(SHARE_DIR, overrides['input_video'])
source_path = os.path.join(temp_dir, 'sourceVideo.mp4')
if 'video_save_path' in overrides:
source_path = overrides['video_save_path']
return {
'avi_path': avi_path,
'final_path': os.path.join(temp_dir, 'final.avi'),
'output_path': os.path.join(SHARE_DIR, overrides['output_path']),
'source_path': source_path,
'subtitle_path': '/usr/src/share/subtitles.yaml',
'temp_dir': temp_dir,
'video_quality': overrides['video_quality'],
'video_url': overrides['video_url'] if 'video_url' in overrides else DEFAULT_VIDEO
}
@click.group()
@click.option('--video-quality', '-q', default=135, type=int)
@click.pass_context
def cli(ctx, video_quality):
ctx.ensure_object(dict)
ctx.obj[VIDEO_QUALITY] = video_quality
@cli.command()
@click.option('--output', '-o', default='/usr/src/share/results.gif', type=str)
@click.option('--input-video', '-i', default='', type=str)
@click.pass_context
def run(ctx, output, input_video):
"""Runs the entire recipe: download, conversion, subtitle, save"""
overrides = {
'input_video': input_video,
'output_path': output,
'video_quality': ctx.obj[VIDEO_QUALITY]
}
config = _generate_config(overrides=overrides)
generator = GenerateGifIt()
cmd = generator.get_instance({
'skip_download': len(input_video) > 0
})
exit_code = cmd.exec(config)
sys.exit(exit_code)
@cli.command()
@click.option('--info', is_flag=True)
@click.argument('url')
@click.pass_context
def download(ctx, info, url):
"""Download the desired video"""
config = _generate_config({
'output_path': '/usr/src/share/results.gif', # todo: remove this
'video_quality': ctx.obj[VIDEO_QUALITY],
'video_save_path': os.path.join(SHARE_DIR, 'video.mp4'),
'video_url': url
})
cmd = None
generator = GenerateGifIt()
if info:
cmd = generator.get_video_options()
else:
cmd = generator.get_downloader()
exit_code = cmd.exec(config=config)
sys.exit(exit_code)
if __name__ == '__main__':
cli(obj={})
| StarcoderdataPython |
6628577 | import time
import cv2
import numpy as np
from display import Display
from extractor import Extractor
width = 1280//2 #1920//2
height = 720//2 #1080//2
disp = Display(width, height)
fe = Extractor()
def frames_per_motion(img):
img = cv2.resize(img, (width, height))
matches = fe.extract(img)
print("%d matches" % (len(matches)))
for point1, point2 in matches:
u1,v1 = map(lambda x: int(round(x)), point1)
u2,v2 = map(lambda x: int(round(x)), point2)
cv2.circle(img, (u1,v1), color = (0,255,0), radius = 1, thickness = 2)
cv2.line(img, (u1,v1), (u2,v2), color = (255,0,0))
disp.paint(img)
if __name__ == "__main__":
cap = cv2.VideoCapture("videos/car3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frames_per_motion(frame)
else:
break
| StarcoderdataPython |
4907095 | <filename>examples/inverted_pendulum.py<gh_stars>100-1000
import numpy as np
import gym
from pilco.models import PILCO
from pilco.controllers import RbfController, LinearController
from pilco.rewards import ExponentialReward
import tensorflow as tf
from gpflow import set_trainable
# from tensorflow import logging
np.random.seed(0)
from utils import rollout, policy
env = gym.make('InvertedPendulum-v2')
# Initial random rollouts to generate a dataset
X,Y, _, _ = rollout(env=env, pilco=None, random=True, timesteps=40, render=True)
for i in range(1,5):
X_, Y_, _, _ = rollout(env=env, pilco=None, random=True, timesteps=40, render=True)
X = np.vstack((X, X_))
Y = np.vstack((Y, Y_))
state_dim = Y.shape[1]
control_dim = X.shape[1] - state_dim
controller = RbfController(state_dim=state_dim, control_dim=control_dim, num_basis_functions=10)
# controller = LinearController(state_dim=state_dim, control_dim=control_dim)
pilco = PILCO((X, Y), controller=controller, horizon=40)
# Example of user provided reward function, setting a custom target state
# R = ExponentialReward(state_dim=state_dim, t=np.array([0.1,0,0,0]))
# pilco = PILCO(X, Y, controller=controller, horizon=40, reward=R)
for rollouts in range(3):
pilco.optimize_models()
pilco.optimize_policy()
import pdb; pdb.set_trace()
X_new, Y_new, _, _ = rollout(env=env, pilco=pilco, timesteps=100, render=True)
# Update dataset
X = np.vstack((X, X_new)); Y = np.vstack((Y, Y_new))
pilco.mgpr.set_data((X, Y))
| StarcoderdataPython |
3438976 | <reponame>ardacoskunses/asitop<gh_stars>0
def parse_thermal_pressure(powermetrics_parse):
return powermetrics_parse["thermal_pressure"]
def parse_bandwidth_metrics(powermetrics_parse):
bandwidth_metrics = powermetrics_parse["bandwidth_counters"]
bandwidth_metrics_dict = {}
data_fields = ["PCPU0 DCS RD", "PCPU0 DCS WR",
"PCPU1 DCS RD", "PCPU1 DCS WR",
"PCPU DCS RD", "PCPU DCS WR",
"ECPU DCS RD", "ECPU DCS WR",
"GFX DCS RD", "GFX DCS WR",
"ISP DCS RD", "ISP DCS WR",
"STRM CODEC DCS RD", "STRM CODEC DCS WR",
"PRORES DCS RD", "PRORES DCS WR",
"VDEC DCS RD", "VDEC DCS WR",
"VENC0 DCS RD", "VENC0 DCS WR",
"VENC1 DCS RD", "VENC1 DCS WR",
"VENC DCS RD", "VENC DCS WR",
"JPG0 DCS RD", "JPG0 DCS WR",
"JPG1 DCS RD", "JPG1 DCS WR",
"JPG DCS RD", "JPG DCS WR",
"DCS RD", "DCS WR"]
for h in data_fields:
bandwidth_metrics_dict[h] = 0
for l in bandwidth_metrics:
if l["name"] in data_fields:
bandwidth_metrics_dict[l["name"]] = l["value"]/(1e9)
bandwidth_metrics_dict["PCPU DCS RD"] = bandwidth_metrics_dict["PCPU DCS RD"] + \
bandwidth_metrics_dict["PCPU0 DCS RD"] + \
bandwidth_metrics_dict["PCPU1 DCS RD"]
bandwidth_metrics_dict["PCPU DCS WR"] = bandwidth_metrics_dict["PCPU DCS WR"] + \
bandwidth_metrics_dict["PCPU0 DCS WR"] + \
bandwidth_metrics_dict["PCPU1 DCS WR"]
bandwidth_metrics_dict["JPG DCS RD"] = bandwidth_metrics_dict["JPG DCS RD"] + \
bandwidth_metrics_dict["JPG0 DCS RD"] + \
bandwidth_metrics_dict["JPG1 DCS RD"]
bandwidth_metrics_dict["JPG DCS WR"] = bandwidth_metrics_dict["JPG DCS WR"] + \
bandwidth_metrics_dict["JPG0 DCS WR"] + \
bandwidth_metrics_dict["JPG1 DCS WR"]
bandwidth_metrics_dict["VENC DCS RD"] = bandwidth_metrics_dict["VENC DCS RD"] + \
bandwidth_metrics_dict["VENC0 DCS RD"] + \
bandwidth_metrics_dict["VENC1 DCS RD"]
bandwidth_metrics_dict["VENC DCS WR"] = bandwidth_metrics_dict["VENC DCS WR"] + \
bandwidth_metrics_dict["VENC0 DCS WR"] + \
bandwidth_metrics_dict["VENC1 DCS WR"]
bandwidth_metrics_dict["MEDIA DCS"] = sum([
bandwidth_metrics_dict["ISP DCS RD"], bandwidth_metrics_dict["ISP DCS WR"],
bandwidth_metrics_dict["STRM CODEC DCS RD"], bandwidth_metrics_dict["STRM CODEC DCS WR"],
bandwidth_metrics_dict["PRORES DCS RD"], bandwidth_metrics_dict["PRORES DCS WR"],
bandwidth_metrics_dict["VDEC DCS RD"], bandwidth_metrics_dict["VDEC DCS WR"],
bandwidth_metrics_dict["VENC DCS RD"], bandwidth_metrics_dict["VENC DCS WR"],
bandwidth_metrics_dict["JPG DCS RD"], bandwidth_metrics_dict["JPG DCS WR"],
])
return bandwidth_metrics_dict
def parse_cpu_metrics(powermetrics_parse):
cpu_metrics = powermetrics_parse["processor"]
cpu_metric_dict = {}
# cpu_clusters
cpu_clusters = cpu_metrics["clusters"]
for cluster in cpu_clusters:
name = cluster["name"]
cpu_metric_dict[name+"_freq_Mhz"] = int(cluster["freq_hz"]/(1e6))
cpu_metric_dict[name+"_active"] = int((1 - cluster["idle_ratio"])*100)
if "P-Cluster_active" not in cpu_metric_dict:
cpu_metric_dict["P-Cluster_active"] = int((
cpu_metric_dict["P0-Cluster_active"] + cpu_metric_dict["P1-Cluster_active"])/2)
if "P-Cluster_freq_Mhz" not in cpu_metric_dict:
cpu_metric_dict["P-Cluster_freq_Mhz"] = max(
cpu_metric_dict["P0-Cluster_freq_Mhz"], cpu_metric_dict["P1-Cluster_freq_Mhz"])
# power
cpu_metric_dict["ane_W"] = cpu_metrics["ane_energy"]/1000
cpu_metric_dict["dram_W"] = cpu_metrics["dram_energy"]/1000
cpu_metric_dict["cpu_W"] = cpu_metrics["cpu_energy"]/1000
cpu_metric_dict["gpu_W"] = cpu_metrics["gpu_energy"]/1000
cpu_metric_dict["package_W"] = cpu_metrics["package_energy"]/1000
return cpu_metric_dict
def parse_gpu_metrics(powermetrics_parse):
gpu_metrics = powermetrics_parse["gpu"]
gpu_metrics_dict = {
"freq_MHz": int(gpu_metrics["freq_hz"]),
"active": int((1 - gpu_metrics["idle_ratio"])*100),
}
return gpu_metrics_dict
| StarcoderdataPython |
11229803 | <gh_stars>1-10
from forum.database.models.category import Category
from flask_seeder import Seeder
from slugify import slugify
category_names = ["PHP", "Javascript", "Python", "HTML-CSS"]
class CategorySeeder(Seeder):
def run(self):
for category_name in category_names:
category = Category(name=category_name, slug=slugify(category_name))
category.save()
| StarcoderdataPython |
170076 | # https://leetcode.com/problems/kids-with-the-greatest-number-of-candies/submissions/
class Solution:
def kidsWithCandies(self, candies: [int], extraCandies: int) -> [bool]:
maxCandies = max(candies, default=0)
return [True if v + extraCandies >= maxCandies else False for v in candies] | StarcoderdataPython |
9683997 | <gh_stars>0
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from google.protobuf import empty_pb2
from api import resource_name_converters as rnc
from api.v3 import monorail_servicer
from api.v3.api_proto import feature_objects_pb2
from api.v3.api_proto import hotlists_pb2
from api.v3.api_proto import hotlists_prpc_pb2
from businesslogic import work_env
from framework import exceptions
from features import features_constants
from tracker import tracker_constants
class HotlistsServicer(monorail_servicer.MonorailServicer):
"""Handle API requests related to Hotlist objects.
Each API request is implemented with a method as defined in the
.proto file. Each method does any request-specific validation, uses work_env
to safely operate on business objects, and returns a response proto.
"""
# NOTE(crbug/monorail/7614): Until the referenced cleanup is complete,
# all servicer methods that are scoped to a single Project need to call
# mc.LookupLoggedInUserPerms.
# Methods in this file do not because hotlists can span projects.
DESCRIPTION = hotlists_prpc_pb2.HotlistsServiceDescription
@monorail_servicer.PRPCMethod
def ListHotlistItems(self, mc, request):
# type: (MonorailContext, ListHotlistItemsRequest) ->
# ListHotlistItemsResponse
"""pRPC API method that implements ListHotlistItems.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to view the hotlist.
InputException if the request.page_token is invalid, the request does
not match the previous request that provided the given page_token, or
the page_size is a negative value.
"""
hotlist_id = rnc.IngestHotlistName(request.parent)
if request.page_size < 0:
raise exceptions.InputException('`page_size` cannot be negative.')
page_size = request.page_size
if (not request.page_size or
request.page_size > features_constants.DEFAULT_RESULTS_PER_PAGE):
page_size = features_constants.DEFAULT_RESULTS_PER_PAGE
# TODO(crbug/monorail/7104): take start from request.page_token
start = 0
sort_spec = request.order_by.replace(',', ' ')
with work_env.WorkEnv(mc, self.services) as we:
list_result = we.ListHotlistItems(
hotlist_id, page_size, start,
tracker_constants.ALL_ISSUES_CAN, sort_spec, '')
# TODO(crbug/monorail/7104): plug in next_page_token when it's been
# implemented.
next_page_token = ''
return hotlists_pb2.ListHotlistItemsResponse(
items=self.converter.ConvertHotlistItems(hotlist_id, list_result.items),
next_page_token=next_page_token)
@monorail_servicer.PRPCMethod
def RerankHotlistItems(self, mc, request):
# type: (MonorailContext, RerankHotlistItemsRequest) -> Empty
"""pRPC API method that implements RerankHotlistItems.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to rerank the hotlist.
InputException if request.target_position is invalid or
request.hotlist_items is empty or contains invalid items.
NoSuchIssueException if hotlist item does not exist.
"""
hotlist_id = rnc.IngestHotlistName(request.name)
moved_issue_ids = rnc.IngestHotlistItemNames(
mc.cnxn, request.hotlist_items, self.services)
with work_env.WorkEnv(mc, self.services) as we:
we.RerankHotlistItems(
hotlist_id, moved_issue_ids, request.target_position)
return empty_pb2.Empty()
@monorail_servicer.PRPCMethod
def RemoveHotlistItems(self, mc, request):
# type: (MonorailContext, RemoveHotlistItemsRequest) -> Empty
"""pPRC API method that implements RemoveHotlistItems.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to edit the hotlist.
InputException if the items to be removed are not found in the hotlist.
"""
hotlist_id = rnc.IngestHotlistName(request.parent)
remove_issue_ids = rnc.IngestIssueNames(
mc.cnxn, request.issues, self.services)
with work_env.WorkEnv(mc, self.services) as we:
we.RemoveHotlistItems(hotlist_id, remove_issue_ids)
return empty_pb2.Empty()
@monorail_servicer.PRPCMethod
def AddHotlistItems(self, mc, request):
# type: (MonorailContext, AddHotlistItemsRequest) -> Empty
"""pRPC API method that implements AddHotlistItems.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to edit the hotlist.
InputException if the request.target_position is invalid or the given
list of issues to add is empty or invalid.
"""
hotlist_id = rnc.IngestHotlistName(request.parent)
new_issue_ids = rnc.IngestIssueNames(mc.cnxn, request.issues, self.services)
with work_env.WorkEnv(mc, self.services) as we:
we.AddHotlistItems(hotlist_id, new_issue_ids, request.target_position)
return empty_pb2.Empty()
@monorail_servicer.PRPCMethod
def RemoveHotlistEditors(self, mc, request):
# type: (MonorailContext, RemoveHotlistEditorsRequest) -> Empty
"""pPRC API method that implements RemoveHotlistEditors.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to edit the hotlist.
InputException if the editors to be removed are not found in the hotlist.
"""
hotlist_id = rnc.IngestHotlistName(request.name)
remove_user_ids = rnc.IngestUserNames(
mc.cnxn, request.editors, self.services)
with work_env.WorkEnv(mc, self.services) as we:
we.RemoveHotlistEditors(hotlist_id, remove_user_ids)
return empty_pb2.Empty()
@monorail_servicer.PRPCMethod
def GetHotlist(self, mc, request):
# type: (MonorailContext, GetHotlistRequest) -> Hotlist
"""pRPC API method that implements GetHotlist.
Raises:
InputException if the given name does not have a valid format.
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to view the hotlist.
"""
hotlist_id = rnc.IngestHotlistName(request.name)
with work_env.WorkEnv(mc, self.services) as we:
hotlist = we.GetHotlist(hotlist_id)
return self.converter.ConvertHotlist(hotlist)
@monorail_servicer.PRPCMethod
def GatherHotlistsForUser(self, mc, request):
# type: (MonorailContext, GatherHotlistsForUserRequest)
# -> GatherHotlistsForUserResponse
"""pRPC API method that implements GatherHotlistsForUser.
Raises:
NoSuchUserException if the user is not found.
InputException if some request parameters are invalid.
"""
user_id = rnc.IngestUserName(mc.cnxn, request.user, self.services)
with work_env.WorkEnv(mc, self.services) as we:
hotlists = we.ListHotlistsByUser(user_id)
return hotlists_pb2.GatherHotlistsForUserResponse(
hotlists=self.converter.ConvertHotlists(hotlists))
@monorail_servicer.PRPCMethod
def UpdateHotlist(self, mc, request):
# type: (MonorailContext, UpdateHotlistRequest) -> UpdateHotlistResponse
"""pRPC API method that implements UpdateHotlist.
Raises:
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to make this update.
InputException if some request parameters are required and missing or
invalid.
"""
if not request.update_mask:
raise exceptions.InputException('No paths given in `update_mask`.')
if not request.hotlist:
raise exceptions.InputException('No `hotlist` param given.')
if not request.update_mask.IsValidForDescriptor(
feature_objects_pb2.Hotlist.DESCRIPTOR):
raise exceptions.InputException('Invalid `update_mask` for `hotlist`')
hotlist_id = rnc.IngestHotlistName(request.hotlist.name)
update_args = {}
hotlist = request.hotlist
for path in request.update_mask.paths:
if path == 'display_name':
update_args['hotlist_name'] = hotlist.display_name
elif path == 'owner':
owner_id = rnc.IngestUserName(mc.cnxn, hotlist.owner, self.services)
update_args['owner_id'] = owner_id
elif path == 'editors':
add_editor_ids = rnc.IngestUserNames(
mc.cnxn, hotlist.editors, self.services)
update_args['add_editor_ids'] = add_editor_ids
elif path == 'summary':
update_args['summary'] = hotlist.summary
elif path == 'description':
update_args['description'] = hotlist.description
elif path == 'hotlist_privacy':
update_args['is_private'] = (
hotlist.hotlist_privacy == feature_objects_pb2.Hotlist
.HotlistPrivacy.Value('PRIVATE'))
elif path == 'default_columns':
update_args[
'default_col_spec'] = self.converter.IngestIssuesListColumns(
hotlist.default_columns)
with work_env.WorkEnv(mc, self.services) as we:
we.UpdateHotlist(hotlist_id, **update_args)
hotlist = we.GetHotlist(hotlist_id, use_cache=False)
return self.converter.ConvertHotlist(hotlist)
@monorail_servicer.PRPCMethod
def DeleteHotlist(self, mc, request):
# type: (MonorailContext, GetHotlistRequest) -> Empty
"""pRPC API method that implements DeleteHotlist.
Raises:
InputException if the given name does not have a valid format.
NoSuchHotlistException if the hotlist is not found.
PermissionException if the user is not allowed to delete the hotlist.
"""
hotlist_id = rnc.IngestHotlistName(request.name)
with work_env.WorkEnv(mc, self.services) as we:
we.DeleteHotlist(hotlist_id)
return empty_pb2.Empty()
| StarcoderdataPython |
134338 | #!/usr/bin/env python3
from bisect import bisect_left
from pathlib import Path
import boto3
class S3Sync:
"""Class needed for syncing local direcory to a S3 bucket"""
def __init__(self):
"""Initialize class with boto3 client"""
self.s3 = boto3.client("s3")
def upload_object(self, source: str, bucket: str, key: str):
self.s3.upload_file(source, Bucket=bucket, Key=key)
def list_bucket_objects(self, bucket: str) -> list[dict]:
"""
List all objects for the given bucket.
:param bucket: Bucket name.
:return: A [dict] containing the elements in the bucket.
"""
try:
contents = self.s3.list_objects(Bucket=bucket)["Contents"]
except KeyError:
# No Contents Key, empty bucket.
return []
else:
return contents
if __name__ == "__main__":
sync = S3Sync()
sync.upload_object("/home/ec2-user/rbo_backup/blog.dump", "rbobackup", "blog.dump")
| StarcoderdataPython |
12861121 | <filename>Datasets/Terrain/us_ned_physio_diversity.py
import ee
from ee_plugin import Map
dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity')
physiographicDiversity = dataset.select('b1')
physiographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-94.625, 39.825, 7)
Map.addLayer(
physiographicDiversity, physiographicDiversityVis,
'Physiographic Diversity')
| StarcoderdataPython |
4955143 | ###############################################################################
# _ _ _ #
# | | (_) | | #
# _ __ _ _ | | __ _ _ __ _ __| | #
# | '_ \ | | | | | | / _` | | '__| | | / _` | #
# | |_) | | |_| | | | | (_| | | | | | | (_| | #
# | .__/ \__, | |_| \__,_| |_| |_| \__,_| #
# | | __/ | #
# |_| |___/ #
# #
# Load Arrays of Imaging Data #
# #
# Copyright (c) 2021, <NAME> #
# #
# pylarid is released under the revised (3-clause) BSD license. #
# For details, see LICENSE.txt #
# #
###############################################################################
# test_dset.py: Unit tests for the larid.Dset class
import larid
import numpy
import unittest
# Demonstration dataset attributes
demo3d = {'ni': 224, 'nj': 256, 'nk': 176, 'nt': 1, 'datatype': 'int16'}
demo4d = {'ni': 64, 'nj': 64, 'nk': 10, 'nt': 240, 'datatype': 'int16'}
class TestDset(unittest.TestCase):
"""Test basic functionality of the larid.Dset class"""
def test_init_noarg(self):
with self.assertRaises(TypeError):
larid.Dset()
def test_init_onearg(self):
with self.assertRaises(TypeError):
larid.Dset(1)
def test_init_twoarg(self):
with self.assertRaises(TypeError):
larid.Dset(1, 1)
def test_init_threearg(self):
with self.assertRaises(TypeError):
larid.Dset(1, 1, 1)
def test_init_fourarg(self):
with self.assertRaises(TypeError):
larid.Dset(1, 1, 1, 1)
def test_init_fivearg(self):
with self.assertRaises(TypeError):
larid.Dset(1, 1, 1, 1, 'uint8')
def test_init_dim_str(self):
x = '1'
for dim in ((x, 1, 1, 1), (1, x, 1, 1), (1, 1, x, 1), (1, 1, 1, x)):
with self.assertRaises(TypeError):
larid.Dset(*dim, datatype='uint8', morder='tkji')
def test_init_dim_undermin(self):
x = 0
for dim in ((x, 1, 1, 1), (1, x, 1, 1), (1, 1, x, 1), (1, 1, 1, x)):
with self.assertRaisesRegex(
ValueError,
'Dataset dimensions must be greater than zero'):
larid.Dset(*dim, datatype='uint8', morder='tkji')
def test_init_dim_overmax(self):
x = 2 ** 63
for dim in ((x, 1, 1, 1), (1, x, 1, 1), (1, 1, x, 1), (1, 1, 1, x)):
with self.assertRaises(OverflowError):
larid.Dset(*dim, datatype='uint8', morder='tkji')
def test_init_overflow(self):
x = 0xF0000000
with self.assertRaises(ValueError):
larid.Dset(x, x, x, x, datatype='uint8', morder='tkji')
def test_init_datatype_numeric(self):
with self.assertRaisesRegex(TypeError, 'Data type must be a string'):
larid.Dset(1, 1, 1, 1, 1, 'tkji')
def test_init_datatype_bad(self):
with self.assertRaisesRegex(ValueError, 'Invalid data type: bad'):
larid.Dset(1, 1, 1, 1, 'bad', 'tkji')
def test_init_morder_numeric(self):
with self.assertRaisesRegex(TypeError,
'Memory order must be a string'):
larid.Dset(1, 1, 1, 1, 'uint8', 1)
def test_init_morder_bad(self):
with self.assertRaisesRegex(ValueError, 'Invalid memory order: bad'):
larid.Dset(1, 1, 1, 1, 'uint8', 'bad')
def test_init_good(self):
# Create a Dset object for the 4D demonstration dataset
obj = larid.Dset(demo4d['ni'], demo4d['nj'], demo4d['nk'],
demo4d['nt'], demo4d['datatype'], 'tkji')
p1 = obj.data.__array_interface__['data'][0]
# Check array shape and data type
self.assertEqual(obj.data.shape, (demo4d['nt'], demo4d['nk'],
demo4d['nj'], demo4d['ni']))
self.assertEqual(obj.data.dtype, numpy.dtype(demo4d['datatype']))
# Check returned dimensions, data type, and memory order
self.assertEqual(obj.ni, demo4d['ni'])
self.assertEqual(obj.nj, demo4d['nj'])
self.assertEqual(obj.nk, demo4d['nk'])
self.assertEqual(obj.nt, demo4d['nt'])
self.assertEqual(obj.datatype, demo4d['datatype'])
self.assertEqual(obj.morder, 'tkji')
# Check returned pixdims and toffset
self.assertEqual(obj.di, 1.0)
self.assertEqual(obj.dj, 1.0)
self.assertEqual(obj.dk, 1.0)
self.assertEqual(obj.dt, 1.0)
self.assertEqual(obj.toffset, 0.0)
# Check returned NIfTI intent information
self.assertEqual(obj.intent_code, 'NIFTI_INTENT_NONE')
self.assertEqual(obj.intent_p1, 0.0)
self.assertEqual(obj.intent_p2, 0.0)
self.assertEqual(obj.intent_p3, 0.0)
self.assertIsNone(obj.intent_name)
# Attempt to set read-only attributes
with self.assertRaises(AttributeError):
obj.data = numpy.zeros((1, 1, 1, 1), obj.data.dtype)
with self.assertRaises(AttributeError):
obj.ni = 1
with self.assertRaises(AttributeError):
obj.nj = 1
with self.assertRaises(AttributeError):
obj.nk = 1
with self.assertRaises(AttributeError):
obj.nt = 1
with self.assertRaises(AttributeError):
obj.datatype = 'uint8'
with self.assertRaises(AttributeError):
obj.di = 3.0
with self.assertRaises(AttributeError):
obj.dj = 3.0
with self.assertRaises(AttributeError):
obj.dk = 3.0
with self.assertRaises(AttributeError):
obj.dt = 3.0
with self.assertRaises(AttributeError):
obj.toffset = 3.0
with self.assertRaises(AttributeError):
obj.intent_code = 'NIFTI_INTENT_NONE'
with self.assertRaises(AttributeError):
obj.intent_p1 = 0.0
with self.assertRaises(AttributeError):
obj.intent_p2 = 0.0
with self.assertRaises(AttributeError):
obj.intent_p3 = 0.0
with self.assertRaises(AttributeError):
obj.intent_name = 'test'
# Attempt to delete read-write attributes
with self.assertRaisesRegex(TypeError,
'Cannot delete the morder attribute'):
delattr(obj, 'morder')
# Attributes should be preserved after failed re-initialization
with self.assertRaises(ValueError):
obj.__init__(demo4d['ni'], demo4d['nj'], demo4d['nk'], 0,
demo4d['datatype'], 'tkji')
p2 = obj.data.__array_interface__['data'][0]
self.assertEqual(p1, p2)
self.assertEqual(obj.data.shape, (demo4d['nt'], demo4d['nk'],
demo4d['nj'], demo4d['ni']))
self.assertEqual(obj.data.dtype, numpy.dtype(demo4d['datatype']))
self.assertEqual(obj.ni, demo4d['ni'])
self.assertEqual(obj.nj, demo4d['nj'])
self.assertEqual(obj.nk, demo4d['nk'])
self.assertEqual(obj.nt, demo4d['nt'])
self.assertEqual(obj.datatype, demo4d['datatype'])
self.assertEqual(obj.morder, 'tkji')
self.assertEqual(obj.di, 1.0)
self.assertEqual(obj.dj, 1.0)
self.assertEqual(obj.dk, 1.0)
self.assertEqual(obj.dt, 1.0)
self.assertEqual(obj.toffset, 0.0)
self.assertEqual(obj.intent_code, 'NIFTI_INTENT_NONE')
self.assertEqual(obj.intent_p1, 0.0)
self.assertEqual(obj.intent_p2, 0.0)
self.assertEqual(obj.intent_p3, 0.0)
self.assertIsNone(obj.intent_name)
# Re-initialize the object for the 3D demonstration dataset
# Attributes should change after successful re-initialization
obj.__init__(demo3d['ni'], demo3d['nj'], demo3d['nk'], demo3d['nt'],
demo3d['datatype'], 'tkji')
p3 = obj.data.__array_interface__['data'][0]
self.assertNotEqual(p1, p3)
self.assertEqual(obj.data.shape, (demo3d['nt'], demo3d['nk'],
demo3d['nj'], demo3d['ni']))
self.assertEqual(obj.data.dtype, numpy.dtype(demo3d['datatype']))
self.assertEqual(obj.ni, demo3d['ni'])
self.assertEqual(obj.nj, demo3d['nj'])
self.assertEqual(obj.nk, demo3d['nk'])
self.assertEqual(obj.nt, demo3d['nt'])
self.assertEqual(obj.datatype, demo3d['datatype'])
self.assertEqual(obj.morder, 'tkji')
self.assertEqual(obj.di, 1.0)
self.assertEqual(obj.dj, 1.0)
self.assertEqual(obj.dk, 1.0)
self.assertEqual(obj.dt, 0.0)
self.assertEqual(obj.toffset, 0.0)
self.assertEqual(obj.intent_code, 'NIFTI_INTENT_NONE')
self.assertEqual(obj.intent_p1, 0.0)
self.assertEqual(obj.intent_p2, 0.0)
self.assertEqual(obj.intent_p3, 0.0)
self.assertIsNone(obj.intent_name)
def test_copy(self):
# Start with the demonstration 4D dataset
obj1 = larid.Dset(demo4d['ni'], demo4d['nj'], demo4d['nk'],
demo4d['nt'], demo4d['datatype'], 'tkji')
p1 = obj1.data.__array_interface__['data'][0]
# Do the copy
obj2 = obj1.copy()
p2 = obj2.data.__array_interface__['data'][0]
# Voxel data should not be the same array object
self.assertNotEqual(p1, p2)
# Voxel data should have the same shape, dtype, and data
self.assertEqual(obj1.data.shape, obj2.data.shape)
self.assertEqual(obj1.data.dtype, obj2.data.dtype)
self.assertTrue(numpy.allclose(obj1.data, obj2.data))
# Attributes should be the same
self.assertEqual(obj1.ni, obj2.ni)
self.assertEqual(obj1.nj, obj2.nj)
self.assertEqual(obj1.nk, obj2.nk)
self.assertEqual(obj1.nt, obj2.nt)
self.assertEqual(obj1.datatype, obj2.datatype)
self.assertEqual(obj1.morder, obj2.morder)
self.assertEqual(obj1.di, obj2.di)
self.assertEqual(obj1.dj, obj2.dj)
self.assertEqual(obj1.dk, obj2.dk)
self.assertEqual(obj1.dt, obj2.dt)
self.assertEqual(obj1.toffset, obj2.toffset)
self.assertEqual(obj1.intent_code, obj2.intent_code)
self.assertEqual(obj1.intent_p1, obj2.intent_p1)
self.assertEqual(obj1.intent_p2, obj2.intent_p2)
self.assertEqual(obj1.intent_p3, obj2.intent_p3)
self.assertEqual(obj1.intent_name, obj2.intent_name)
if __name__ == '__main__':
unittest.main()
###############################################################################
| StarcoderdataPython |
3463870 | <gh_stars>10-100
import logging
import time
class ChromeLogin():
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
emailfield = "//*[@type='email']"
passfield = "//*[@name='password']"
passedLogin = "Control, protect, and secure your account, all in one place"
def login(self):
print("You need to sign into chrome for captcha to work")
self.driver.get('https://accounts.google.com/signin/v2')
while True:
if 'Manage your info' in self.driver.page_source:
break
elif 'Control, protect' in self.driver.page_source:
break
else:
continue
time.sleep(1)
def redirectToStore(self, domain):
shopifyWebsite = "https://" + domain + ".com/sitemap_products_1.xml"
self.driver.get(shopifyWebsite)
return domain
| StarcoderdataPython |
134259 | # Copyright 2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from utils import general, tensor
from models import network_factory
from dataloader import augment
from torchvision import transforms
parser = argparse.ArgumentParser(description="Reversing the cycle: single shot")
# general
parser.add_argument("--gpu_ids", type=str, help="id(s) of the gpu to use", default="0")
parser.add_argument("--ckpt", help="path to checkpoint", required=True)
parser.add_argument("--maxdisp", type=int, default=192, help="maxium disparity")
parser.add_argument("--model", help="stereo network to use", required=True)
parser.add_argument(
"--left", type=str, help="path to left image(s) [space separated]", required=True
)
parser.add_argument(
"--right", type=str, help="path to right image(s) [space separated]", required=True
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
# test
parser.add_argument("--final_h", type=int, default=384, help="height after pad in test")
parser.add_argument("--final_w", type=int, default=1280, help="width after pad in test")
parser.add_argument("--results", type=str, default="./artifacts", help="result folder")
parser.add_argument(
"--qualitative", action="store_true", help="save colored maps instead of 16bit"
)
parser.add_argument(
"--cmap",
type=str,
default="magma",
help="colormap to use",
choices=["magma", "gray", "jet", "kitti"],
)
parser.add_argument(
"--maxval", type=int, default=-1, help="max value in kitti colormap"
)
args = parser.parse_args()
gpus = general.parse_gpu_ids(args.gpu_ids)
args.cuda = len(gpus) > 0 and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
def _parse(names):
"""Split a string with space separated valus.
Args:
Return:
a list where each element is a non-empty value of the original list
"""
imgs = names.split(" ")
imgs = [x.strip() for x in imgs if x.strip()]
return imgs
def run_single_shot(network):
""" Generate depth for a single (or a list of) example.
Args:
network: pre-trained stereo model
"""
test_params = {
"results": args.results,
"model": args.model,
"lefts": _parse(args.left),
"rights": _parse(args.right),
"qualitative": args.qualitative,
"maxval": args.maxval,
"cmap": args.cmap,
}
padding_params = {
"final_h": args.final_h,
"final_w": args.final_w,
}
network.eval()
transformation = augment.ComposeTransformation(
[
augment.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
]
)
with tqdm(total=len(test_params["lefts"])) as pbar:
for (left_i, right_i) in zip(test_params["lefts"], test_params["rights"]):
if not os.path.exists(left_i):
print("missing left image:{}".format(left_i))
continue
if not os.path.exists(right_i):
print("missing right image:{}".format(right_i))
continue
left_img = cv2.imread(left_i)
right_img = cv2.imread(right_i)
if left_img.shape != right_img.shape:
raise ValueError("Left and right images have different shapes")
h, w, _ = left_img.shape
top_pad = padding_params["final_h"] - h
left_pad = padding_params["final_w"] - w
# add padding
left_img = np.lib.pad(
left_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode="edge"
)
right_img = np.lib.pad(
right_img, ((top_pad, 0), (0, left_pad), (0, 0)), mode="edge"
)
# transorm to tensor
left = transformation(left_img)
right = transformation(right_img)
# create batch
left = torch.unsqueeze(left, 0)
right = torch.unsqueeze(right, 0)
name = "disp_" + os.path.basename(left_i)
if args.cuda:
# loading images on GPU
left = torch.FloatTensor(left).cuda()
right = torch.FloatTensor(right).cuda()
left, right = Variable(left), Variable(right)
# make prediction
with torch.no_grad():
output = network(left, right)
output = torch.squeeze(output)
output = torch.nn.functional.relu(output)
output = output.data.cpu().numpy()
extension = "." + name.split(".")[-1]
name = name.replace(extension, "")
# remove padding
if left_pad == 0:
final_output = output[top_pad:, :]
else:
final_output = output[top_pad:, :-left_pad]
if final_output.shape[0] != h or final_output.shape[1] != w:
raise ValueError("Problems in cropping final predictions")
destination = os.path.join(
test_params["results"], test_params["model"], "{}", name + ".png"
)
# saving predictions
if test_params["qualitative"]:
min_value = final_output.min()
max_value = final_output.max()
final_output = (final_output - min_value) / (max_value - min_value)
final_output *= 255.0
general.save_color(
destination.format("qualitative"),
final_output,
cmap=test_params["cmap"],
params={"maxval": test_params["maxval"]},
)
else:
general.save_kitti_disp(destination.format("16bit"), final_output)
pbar.update(1)
print("Done! Predictions saved in {} folder".format(test_params["results"]))
if __name__ == "__main__":
print("=> model: {}".format(args.model))
print("=> checkpoint: {}".format(args.ckpt))
if not os.path.exists(args.ckpt):
raise ValueError("Checkpoint not found!")
model = network_factory.get_network(args.model)(
{"maxdisp": args.maxdisp, "imagenet_pt": False}
)
if args.cuda:
print("=> selected gpu(s) with ids {}".format(*gpus))
model = nn.DataParallel(model)
model.cuda()
print(
"=> Number of model parameters: {}".format(
sum(p.numel() for p in model.parameters() if p.requires_grad)
)
)
state_dict = torch.load(args.ckpt)
model.load_state_dict(state_dict["state_dict"], strict=True)
print("EPOCHS: {}".format(state_dict["epoch"]))
run_single_shot(model)
| StarcoderdataPython |
6661448 | <gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
star_catalog_creator.py
Script used for creating star catalogs, k-vectors, etc.
Distributed under the 3-Clause BSD License (below)
Copyright 2019 Rensselaer Polytechnic Institute
(Dr. <NAME>, <NAME>, <NAME>)
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names
of its contributors may be used to endorse or promote
products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
################################
#LOAD LIBRARIES
################################
import sys
import os
import cv2
import time
import numpy as np
import star_tracker.cam_matrix as cam_matrix
import star_tracker.ground as ground
#############################################################
#USER-INPUT VARS
#############################################################
b_thresh = 6.0 # brightness threshold, recommend 5-6 to start with
cam_config_file = '' #the name of the camera config file in /data
#############################################################
#MAIN CODE
#############################################################
# CREATE CATALOG
#row_start = 50 # row where data begins in starCatParallax
save_vals = True # option to save values or just return them from function
#col_brightness = 1 # column in star catalog containing the brightness values
#col_rade = [5, 6] # column containing RA and DE values in starCatParallax
#col_pm = [3, 4] # column containing proper motion values in starCatParallax
#col_par = 2 # column containing parallax values in starCatParallax
#row_ep = 12 # row with catalog epoch
#row_ep = 38 # row with catalog epoch
#Excess rows to remove from starcat_file
excess_rows = [53, 54]
# column (0-indexing) containing the Hipparcos ID number
index_col = 2
tools_dir = os.path.dirname(os.path.realpath(__file__))
py_src_dir = os.path.dirname(tools_dir)
repo_dir = os.path.dirname(py_src_dir)
starcat_file = os.path.join(repo_dir, os.path.join('data','starcat.tsv'))
cam_config_dir = os.path.join(repo_dir, os.path.join('data','cam_config'))
cam_config_file = os.path.join(cam_config_dir, cam_config_file)
camera_matrix, cam_resolution, dist_coefs = cam_matrix.read_cam_json(cam_config_file)
nrow = cam_resolution[1]
ncol = cam_resolution[0]
fov = cam_matrix.cam2fov(cam_matrix.cam_matrix_inv(camera_matrix), nrow, ncol)
#TODO: Replace with actual observer location. Currently set to zero to just ignore parallax.
rB_scalar = 0*149597870.693
rB = np.array([[rB_scalar],[0],[0]]) #unit vector from SSB to observer
#check to see if the user has defined a directory to save things in already
try: print("Creating star pair catalog including stars up to mag "+str(b_thresh)+", saving to: "+str(save_dir)+" ...")
except: #if not, put them in the data dir
file_dir = os.path.realpath(__file__)
tools_dir = os.path.dirname(file_dir)
py_src_dir = os.path.dirname(tools_dir)
top_level_dir = os.path.dirname(py_src_dir)
save_dir = os.path.join(top_level_dir,'data')
print("Creating star pair catalog including stars up to mag "+str(b_thresh)+", saving to: "+str(save_dir)+" ...")
start_time = time.time()
ground.create_star_catalog(starcat_file=starcat_file, brightness_thresh=b_thresh,
excess_rows=excess_rows, index_col=index_col, fov=fov,
save_vals=save_vals, rB=rB, save_dir=save_dir)
print("\n...catalog creation complete in " + str(time.time()-start_time)+ " seconds\n")
| StarcoderdataPython |
4998455 | <filename>src/meetshaus.jmscontent/meetshaus/jmscontent/bannerviewlet.py
from five import grok
from Acquisition import aq_inner
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from plone.app.layout.navigation.interfaces import INavigationRoot
from plone.app.layout.viewlets.interfaces import IPortalHeader
from Products.CMFCore.interfaces import IFolderish
from Products.CMFCore.interfaces import IContentish
from meetshaus.jmscontent.banner import IBanner
class BannerViewlet(grok.Viewlet):
grok.context(IContentish)
grok.require('zope2.View')
grok.name('meetshaus.jmscontent.BannerViewlet')
grok.viewletmanager(IPortalHeader)
def update(self):
self.has_banners = len(self.banner_content()) > 0
self.display_banner_nav = len(self.banner_content()) > 1
def banners(self):
banners = []
for banner in self.banner_content():
obj = banner.getObject()
if obj.text:
body = obj.text.output
else:
body = '<p> </p>'
if obj.position:
caption_klass = obj.position
else:
caption_klass = 'bottomright'
banners.append({
'url': obj.absolute_url(),
'image_tag': self.contruct_image_tag(obj),
'text': body,
'banner_class': caption_klass,
})
return banners
def banner_content(self):
context = aq_inner(self.context)
catalog = getToolByName(context, 'portal_catalog')
if IFolderish.providedBy(context):
query_path = '/'.join(context.getPhysicalPath())
else:
parent = context.__parent__
query_path = '/'.join(parent.getPhysicalPath())
results = catalog(object_provides=IBanner.__identifier__,
path=dict(query=query_path,
depth=1),
review_state='published',
sort_on='getObjPositionInParent')
return results
def contruct_image_tag(self, obj):
scales = getMultiAdapter((obj, self.request), name='images')
scale = scales.scale('image', scale='preview')
imageTag = None
if scale is not None:
imageTag = scale.tag()
return imageTag
class FrontpageBannerViewlet(grok.Viewlet):
grok.context(INavigationRoot)
grok.require('zope2.View')
grok.name('meetshaus.jmscontent.FrontpageBannerViewlet')
grok.viewletmanager(IPortalHeader)
def update(self):
self.has_banners = len(self.banner_content()) > 0
self.display_banner_nav = len(self.banner_content()) > 1
def banners(self):
banners = []
for banner in self.banner_content():
obj = banner.getObject()
banners.append({
'url': obj.absolute_url(),
'image_tag': self.contruct_image_tag(obj),
'text': obj.text.output,
'banner_class': obj.position,
})
return banners
def banner_content(self):
context = aq_inner(self.context)
catalog = getToolByName(context, 'portal_catalog')
if IFolderish.providedBy(context):
query_path = '/'.join(context.getPhysicalPath())
else:
parent = context.__parent__
query_path = '/'.join(parent.getPhysicalPath())
results = catalog(object_provides=IBanner.__identifier__,
path=dict(query=query_path,
depth=1),
review_state='published',
sort_on='getObjPositionInParent')
return results
def contruct_image_tag(self, obj):
scales = getMultiAdapter((obj, self.request), name='images')
scale = scales.scale('image', scale='preview')
imageTag = None
if scale is not None:
imageTag = scale.tag()
return imageTag
| StarcoderdataPython |
6518337 | <filename>example/blog/event_hooks.py
event_hooks = []
| StarcoderdataPython |
4848733 | <reponame>PackAssembler/PackAssembler
import pytest
from base import BaseTest, match_request
from packassembler.schema import User
from factories import UserFactory
@pytest.fixture
def user(request):
user = UserFactory()
def fin():
user.delete()
request.addfinalizer(fin)
return user
class TestUserViews(BaseTest):
def _get_test_class(self):
from packassembler.views.user import UserViews
return UserViews
def user_request(self, user_id):
return self.make_one(match_request(id=user_id))
def test_delete_user(self, user):
self.authenticate(user)
self.user_request(user.id).deleteuser()
assert len(User.objects) == 0
def test_profile(self, user):
response = self.user_request(user.id).profile()
assert response['title'] == user.username
assert response['owner'] == user
| StarcoderdataPython |
8160253 | <gh_stars>1-10
import streamlit as st
from multiapp import MultiApp
from apps import home, svm, knn, logistic_regression
# decision_tree, random_forest, naive_bayes
app = MultiApp()
app.add_app("-----", home.app)
app.add_app("KNN", knn.app)
app.add_app("SVM", svm.app)
app.add_app("Logistic Regression", logistic_regression.app)
# app.add_app("Decision Tree", decision_tree.app)
# app.add_app("Random Forest", random_forest.app)
# app.add_app("Naive Bayes", naive_bayes.app)
app.run()
| StarcoderdataPython |
281060 | <gh_stars>1-10
#Standard python libraries
import os
import warnings
import copy
import time
import itertools
import functools
#Dependencies - numpy, scipy, matplotlib, pyfftw
import numpy as np
import pyx
class DiagramDrawer:
"""This class is used to draw double-sided Feynman diagrams and save them as pdf files
"""
def __init__(self):
self.draw_functions = {'Ku':self.draw_Ku,'Kd':self.draw_Kd,'Bu':self.draw_Bu,'Bd':self.draw_Bd}
self.pulse_labels = ['a','b','c','d','e','f','g','h','i','j','k','l','m',
'n','o','p','q','r','s','t','u','v','w','x','y','z']
def save_diagram(self,diagram,*,folder_name = ''):
os.makedirs(folder_name,exist_ok=True)
self.c = pyx.canvas.canvas()
filename = ''
interaction_counter = 0
for KB,n in diagram:
self.draw_functions[KB](n,interaction_counter)
filename += KB + str(n)
interaction_counter += 1
self.c.writePDFfile(os.path.join(folder_name,filename))
def save_diagrams(self,diagrams,*,folder_name=''):
for diagram in diagrams:
self.save_diagram(diagram,folder_name=folder_name)
def display_diagram(self,diagram,*,exclude="image/png"):
"""Displays a diagram in a Jupyter notebook environment or similar
Args:
diagram: Feynman diagram to be drawn, must be a list or tuple of tuples
exclude: MIME types to exclude when attempting to display diagram
"""
self.c = pyx.canvas.canvas()
interaction_counter = 0
for KB,n in diagram:
self.draw_functions[KB](n,interaction_counter)
interaction_counter += 1
display(self.c,exclude=exclude)
def display_diagrams(self,diagrams,*,exclude="image/png"):
for diagram in diagrams:
self.display_diagram(diagram,exclude=exclude)
def double_sided(self,pulse_num):
self.c.stroke(pyx.path.line(0,pulse_num,0,pulse_num+1))
self.c.stroke(pyx.path.line(1, pulse_num, 1, pulse_num+1))
def K_circle(self,pulse_num):
self.c.fill(pyx.path.circle(0,pulse_num+0.5,0.1))
def B_circle(self,pulse_num):
self.c.fill(pyx.path.circle(1,pulse_num+0.5,0.1))
def right_arrow(self,x,y,pulse_num):
xf, yf = (x+0.5,y+0.5)
self.c.stroke(pyx.path.line(x,y,xf,yf),[pyx.deco.earrow(size=0.3)])
return xf,yf
def left_arrow(self,x,y,pulse_num):
xf,yf = (x-.5,y+.5)
self.c.stroke(pyx.path.line(x,y,xf,yf),[pyx.deco.earrow(size=0.3)])
return xf,yf
def pulse_text(self,x,y,pulse_num):
text = self.pulse_labels[pulse_num]
self.c.text(x,y,text)
def draw_Bd(self,pulse_num,interaction_num):
m = interaction_num
n = pulse_num
self.double_sided(m)
self.B_circle(m)
x,y = (1,m+0.5)
xf,yf = self.right_arrow(x,y,m)
self.pulse_text(xf-0.1,yf-0.5,n)
def draw_Bu(self,pulse_num,interaction_num):
m = interaction_num
n = pulse_num
self.double_sided(m)
self.B_circle(m)
x,y = (1.5,m)
self.left_arrow(x,y,m)
self.pulse_text(x-0.1,y+0.1,n)
def draw_Kd(self,pulse_num,interaction_num):
m = interaction_num
n = pulse_num
self.double_sided(m)
self.K_circle(m)
xi,yi = (0,m+0.5)
xf,yf = self.left_arrow(xi,yi,m)
self.pulse_text(xf-0.1,yf-0.5,n)
def draw_Ku(self,pulse_num,interaction_num):
m = interaction_num
n = pulse_num
self.double_sided(m)
self.K_circle(m)
xi,yi = (-0.5,m)
xf,yf = self.right_arrow(xi,yi,m)
self.pulse_text(xi-0.1,yi+0.1,n)
class DiagramGenerator(DiagramDrawer):
"""
Args:
detection_type (str): default is 'polarization', other options are 'integrated_polarization' or 'fluorescence'
"""
def __init__(self,*,detection_type = 'polarization'):
DiagramDrawer.__init__(self)
# Code will not actually function until the following three empty
# lists are set by the user
self.efield_times = [] #initialize empty list of times assoicated with each electric field shape
self.efield_wavevectors = []
# Change this, if applicable, to the maximum number of manifolds in the system under study
self.maximum_manifold = np.inf
# Change this to a negative number, possibly -np.inf, if the initial state can be de-excited
self.minimum_manifold = 0
# Used for automatically generating diagram instructions
self.wavevector_dict = {'-':('Bu','Kd'),'+':('Ku','Bd')}
# Used to find resonant-only contributions
self.instruction_to_manifold_transition = {'Bu':np.array([0,1]),
'Bd':np.array([0,-1]),
'Ku':np.array([1,0]),
'Kd':np.array([-1,0])}
self.detection_type = detection_type
if detection_type == 'polarization' or detection_type == 'integrated_polarization':
self.filter_instructions = self.polarization_detection_filter_instructions
elif detection_type == 'fluorescence':
self.filter_instructions = self.fluorescence_detection_filter_instructions
def interaction_tuple_to_str(self,tup):
"""Converts a tuple, tup = (nr,nc) into a string of +'s and -'s
"""
s = '+'*tup[0] + '-'*tup[1]
return s
def set_phase_discrimination(self,interaction_list):
if type(interaction_list[0]) is str:
new_list = interaction_list
elif type(interaction_list[0]) is tuple:
new_list = [self.interaction_tuple_to_str(el) for el in interaction_list]
self.efield_wavevectors = new_list
self.set_pdc()
# If pulses and/or phase discrimination are changed, these two attributes
# must be reset or removed:
try:
del self.pulse_sequence
del self.pulse_overlap_array
except AttributeError:
pass
def set_pdc(self):
num_pulses = len(self.efield_wavevectors)
pdc = np.zeros((num_pulses,2),dtype='int')
for i in range(num_pulses):
for j in range(len(self.efield_wavevectors[i])):
if self.efield_wavevectors[i][j] == '+':
pdc[i,0] += 1
elif self.efield_wavevectors[i][j] == '-':
pdc[i,1] += 1
else:
raise Exception('Could not set phase-discrimination condition')
self.pdc = pdc
self.pdc_tuple = tuple(tuple(pdc[i,:]) for i in range(pdc.shape[0]))
def polarization_detection_filter_instructions(self,instructions):
rho_manifold = np.array([0,0])
for ins in instructions:
rho_manifold += self.instruction_to_manifold_transition[ins]
if rho_manifold[0] < self.minimum_manifold or rho_manifold[1] < self.minimum_manifold:
return False
if rho_manifold[0] > self.maximum_manifold or rho_manifold[1] > self.maximum_manifold:
return False
if rho_manifold[0] - rho_manifold[1] == 1:
return True
else:
return False
def fluorescence_detection_filter_instructions(self,instructions):
rho_manifold = np.array([0,0])
for ins in instructions:
rho_manifold += self.instruction_to_manifold_transition[ins]
if rho_manifold[0] < self.minimum_manifold or rho_manifold[1] < self.minimum_manifold:
return False
if rho_manifold[0] > self.maximum_manifold or rho_manifold[1] > self.maximum_manifold:
return False
if abs(rho_manifold[1]-rho_manifold[1]) == 0 and rho_manifold[1] != 0:
return True
else:
return False
def instructions_from_permutation(self,perm):
f_list = []
efield_order = []
for i,k in perm:
f_list.append(self.wavevector_dict[k])
efield_order.append(i)
all_instructions = itertools.product(*f_list)
filtered_instructions = []
for ins in all_instructions:
if self.filter_instructions(ins):
filtered_instructions.append(tuple(zip(ins,efield_order)))
return filtered_instructions
def wavefunction_instructions_from_permutation(self,perm):
rho_instructions = self.instructions_from_permutation(perm)
psi_instructions = []
for instructions in rho_instructions:
new_instructions = self.convert_rho_instructions_to_psi_instructions(instructions)
if new_instructions in psi_instructions:
pass
else:
psi_instructions.append(new_instructions)
return psi_instructions
def convert_rho_instructions_to_psi_instructions(self,instructions):
psi_instructions = {'ket':[],'bra':[]}
for key, pulse_num in instructions:
if key[0] == 'K':
psi_instructions['ket'].append((key[1],pulse_num))
elif key[0] == 'B':
psi_instructions['bra'].append((key[1],pulse_num))
return psi_instructions
def convert_rho_instructions_list_to_psi_instructions_list(self,instructions_list):
psi_instructions_list = []
for instructions in instructions_list:
psi_instructions = self.convert_rho_instructions_to_psi_instructions(instructions)
if psi_instructions not in psi_instructions_list:
psi_instructions_list.append(psi_instructions)
return psi_instructions_list
def relevant_permutations(self,pulse_time_meshes):
self.set_ordered_interactions()
all_permutations = set(itertools.permutations(self.ordered_interactions))
filtered_permutations = []
for perm in all_permutations:
remove = False
for i in range(len(perm)-1):
indi = perm[i][0]
ti = pulse_time_meshes[indi]
for j in range(i+1,len(perm)):
indj = perm[j][0]
tj = pulse_time_meshes[indj]
if ti[0] > tj[-1]:
remove = True
break
if remove == True:
break
if not remove:
filtered_permutations.append(perm)
return filtered_permutations
def set_ordered_interactions(self):
"""Sets up a list of the time-ordered interactions, along with associated wavevector ('+' or '-')
"""
num_pulses = len(self.efield_wavevectors)
self.ordered_interactions = []
for i in range(num_pulses):
ks = self.efield_wavevectors[i]
for k in ks:
self.ordered_interactions.append((i,k))
def check_new_diagram_conditions(self,arrival_times):
new = np.array(arrival_times)
new_pulse_sequence = np.argsort(new)
new_pulse_overlap_array = np.ones((len(arrival_times),
len(arrival_times)),
dtype='bool')
intervals = self.arrival_times_to_pulse_intervals(arrival_times)
for i in range(len(intervals)):
ti = intervals[i]
for j in range(i+1,len(intervals)):
tj = intervals[j]
if ti[0] > tj[-1]:
new_pulse_overlap_array[i,j] = False
elif ti[-1] < tj[0]:
new_pulse_overlap_array[i,j] = False
try:
logic_statement = (np.allclose(new_pulse_overlap_array,self.pulse_overlap_array)
and np.allclose(new_pulse_sequence,self.pulse_sequence))
if logic_statement:
calculate_new_diagrams = False
else:
calculate_new_diagrams = True
except AttributeError:
calculate_new_diagrams = True
self.pulse_sequence = new_pulse_sequence
self.pulse_overlap_array = new_pulse_overlap_array
return calculate_new_diagrams
def arrival_times_to_pulse_intervals(self,arrival_times):
if self.detection_type == 'polarization' or self.detection_type == 'integrated_polarization':
if len(arrival_times) == len(self.efield_wavevectors) + 1:
# If the arrival time of the local oscillator was included in the list arrival_times,
# remove it, it is not relevant to diagram generation
arrival_times = arrival_times[:-1]
intervals = [self.efield_times[i] + arrival_times[i] for i in range(len(arrival_times))]
return intervals
def set_diagrams(self,arrival_times):
intervals = self.arrival_times_to_pulse_intervals(arrival_times)
efield_permutations = self.relevant_permutations(intervals)
all_instructions = []
for perm in efield_permutations:
all_instructions += self.instructions_from_permutation(perm)
self.current_diagrams = all_instructions
def get_diagrams(self,arrival_times):
calculate_new_diagrams = self.check_new_diagram_conditions(arrival_times)
if calculate_new_diagrams:
self.set_diagrams(arrival_times)
return self.current_diagrams
def get_wavefunction_diagrams(self,arrival_times):
rho_instructions_list = self.get_diagrams(arrival_times)
wavefunction_instructions = self.convert_rho_instructions_list_to_psi_instructions_list(rho_instructions_list)
return wavefunction_instructions
def get_diagram_final_state(self,diagram):
"""Returns ket and bra manifold indices after all diagram interactions
Args:
diagram (list) : list of ordered interactions
Returns: list with the first entry the ket manifold index, and
the second entry the bra manifold index
"""
rho_manifold = np.array([0,0],dtype='int')
for ins,pulse_num in diagram:
rho_manifold += self.instruction_to_manifold_transition[ins]
return list(rho_manifold)
def get_diagram_excitation_manifold(self,diagram,*,number_of_interactions=2):
rho_manifold = np.array([0,0])
for ins,pulse_num in diagram[:number_of_interactions]:
rho_manifold += self.instruction_to_manifold_transition[ins]
if rho_manifold[0] == rho_manifold[1]:
return rho_manifold[0]
else:
return None
def filter_diagrams_by_final_state(self,diagrams,state):
"""Returns all diagrams that end in the specified state
Args:
diagrams (list) : list of diagrams to filter
state (list) : list with the first entry the ket manifold index,
and the second entry the bra manifold index
"""
new_diagrams = []
for diagram in diagrams:
diagram_state = self.get_diagram_final_state(diagram)
if diagram_state == state:
new_diagrams.append(diagram)
return new_diagrams
def filter_diagrams_by_excitation_manifold(self,diagrams,*,manifold=1,number_of_interactions=2):
new_diagrams = []
for diagram in diagrams:
man = self.get_diagram_excitation_manifold(diagram,number_of_interactions=number_of_interactions)
if man == manifold:
new_diagrams.append(diagram)
return new_diagrams
def get_diagram_sign(self,diagram):
if len(diagram) % 2:
sign = 1j
else:
sign = 1
for ins,pulse_num in diagram:
if 'K' in ins:
sign *= -1j
elif 'B' in ins:
sign *= 1j
else:
raise Exception('diagram is not in proper format')
return sign
def filter_diagrams_by_sign(self,diagrams,*,sign=1):
new_diagrams = []
for diagram in diagrams:
diagram_sign = self.get_diagram_sign(diagram)
if diagram_sign == sign:
new_diagrams.append(diagram)
return new_diagrams
def remove_all_permutations(self,diagrams):
new_diagrams = []
diagram_weights = []
set_list = []
for diagram in diagrams:
new_set = set(diagram)
try:
ind = set_list.index(new_set)
diagram_weights[ind] += 1
except ValueError:
new_diagrams.append(diagram)
diagram_weights.append(1)
set_list.append(new_set)
return diagram_weights, new_diagrams
| StarcoderdataPython |
4918127 | <filename>docs/examples/compute/cloudframes/functionality.py
import uuid
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.providers import get_driver
CloudFrames = get_driver(Provider.CLOUDFRAMES)
driver = CloudFrames(url='http://admin:admin@cloudframes:80/appserver/xmlrpc')
# get an available location
location = driver.list_locations()[0]
# and an image
image = driver.list_images()[0]
# as well as a size
size = driver.list_sizes()[0]
# use these to create a node
node = driver.create_node(image=image, name='TEST_%s' % uuid.uuid4(),
size=size, location=location)
# snapshot a node, rollback to the snapshot and destroy the snaphost
snapshot = driver.ex_snapshot_node(node)
driver.ex_rollback_node(node, snapshot)
driver.ex_destroy_snapshot(node, snapshot)
# list running nodes
nodes = [n for n in driver.list_nodes()
if n.state == NodeState.RUNNING]
# reboot node
driver.reboot_node(node)
# destroy the node
driver.destroy_node(node)
| StarcoderdataPython |
149758 | <filename>beartype/cave/__init__.py
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype cave.**
This submodule collects common types (e.g., :class:`NoneType`, the type of the
``None`` singleton) and tuples of common types (e.g., :data:`CallableTypes`, a
tuple of the types of all callable objects).
PEP 484
----------
This module is intentionally *not* compliant with the `PEP 484`_ standard
implemented by the stdlib :mod:`typing` module, which formalizes type hinting
annotations with a catalogue of generic classes and metaclasses applicable to
common use cases. :mod:`typing` enables end users to enforce contractual
guarantees over the contents of arbitrarily complex data structures with the
assistance of third-party static type checkers (e.g., :mod:`mypy`,
:mod:`pyre`), runtime type checkers (e.g., :mod:`beartype`, :mod:`typeguard`),
and integrated development environments (e.g., PyCharm).
Genericity comes at a cost, though. Deeply type checking a container containing
``n`` items, for example, requires type checking both that container itself
non-recursively *and* each item in that container recursively. Doing so has
time complexity ``O(N)`` for ``N >= n`` the total number of items transitively
contained in this container (i.e., items directly contained in this container
*and* items directly contained in containers contained in this container).
While the cost of this operation can be paid either statically *or* amortized
at runtime over all calls to annotated callables accepting that container, the
underlying cost itself remains the same.
By compare, this module only contains standard Python classes and tuples of
such classes intended to be passed as is to the C-based :func:`isinstance`
builtin and APIs expressed in terms of that builtin (e.g., :mod:`beartype`).
This module only enables end users to enforce contractual guarantees over the
types but *not* contents of arbitrarily complex data structures. This
intentional tradeoff maximizes runtime performance at a cost of ignoring the
types of items contained in containers.
In summary:
===================== ==================== ====================================
feature set :mod:`beartype.cave` :mod:`typing`
===================== ==================== ====================================
type checking **shallow** **deep**
type check items? **no** **yes**
`PEP 484`_-compliant? **no** **yes**
time complexity ``O(1)`` ``O(N)``
performance stupid fast *much* less stupid fast
implementation C-based builtin call pure-Python (meta)class method calls
low-level primitive :func:`isinstance` :mod:`typing.TypingMeta`
===================== ==================== ====================================
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: *NEVER IMPORT FROM THIS SUBPACKAGE FROM WITHIN BEARTYPE ITSELF.*
# This subpackage currently imports from expensive third-party packages on
# importation (e.g., NumPy) despite beartype itself *NEVER* requiring those
# imports. Until resolved, this subpackage is considered tainted.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.cave._cavelib import (
# Types.
ArgParserType,
ArgSubparsersType,
NumpyArrayType,
NumpyScalarType,
WeakRefCType,
# Type tuples.
SequenceMutableOrNumpyArrayTypes,
SequenceOrNumpyArrayTypes,
SetuptoolsVersionTypes,
VersionComparableTypes,
VersionTypes,
WeakRefProxyCTypes,
)
from beartype._cave._caveabc import BoolType
from beartype._cave._cavefast import (
# Types.
AnyType,
AsyncCoroutineCType,
AsyncGeneratorCType,
CallableCodeObjectType,
CallablePartialType,
ClassType,
CollectionType,
ContainerType,
EllipsisType,
EnumType,
EnumMemberType,
FileType,
FunctionType,
FunctionOrMethodCType,
GeneratorCType,
GeneratorType,
HashableType,
HintGenericSubscriptedType,
IntOrFloatType,
IntType,
IterableType,
IteratorType,
MappingMutableType,
MappingType,
MethodBoundInstanceDunderCType,
MethodBoundInstanceOrClassType,
MethodDecoratorClassType,
MethodDecoratorPropertyType,
MethodDecoratorStaticType,
MethodUnboundClassCType,
MethodUnboundInstanceDunderCType,
MethodUnboundInstanceNondunderCType,
ModuleType,
NoneType,
NotImplementedType,
NumberRealType,
NumberType,
SizedType,
QueueType,
RegexCompiledType,
RegexMatchType,
SetType,
SequenceMutableType,
SequenceType,
StrType,
UnavailableType,
# Type tuples.
AsyncCTypes,
BoolOrNumberTypes,
CallableCTypes,
CallableOrClassTypes,
CallableOrStrTypes,
CallableTypes,
DecoratorTypes,
FunctionTypes,
ModuleOrStrTypes,
MethodBoundTypes,
MethodDecoratorBuiltinTypes,
MethodUnboundTypes,
MethodTypes,
MappingOrSequenceTypes,
ModuleOrSequenceTypes,
NumberOrIterableTypes,
NumberOrSequenceTypes,
RegexTypes,
ScalarTypes,
TestableTypes,
UnavailableTypes,
)
from beartype._cave._cavemap import NoneTypeOr
# ....................{ IMPORTS ~ obsolete }....................
#FIXME: Deprecate this. We previously published this ambiguously named type.
#Perhaps use a module __getattr__() under whatever Python versions support it?
#Note this requires Python >= 3.7, but that Python 3.6 will simply ignore that
#dunder method, which is obviously acceptable, because Python 3.6 is dead.
HintPep585Type = HintGenericSubscriptedType
# ....................{ DUNDERS }....................
# Intentionally defined last, as nobody wants to stumble into a full-bore rant
# first thing in the morning.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
'''
Special list global referencing a single attribute guaranteed *not* to exist.
The definition of this global effectively prohibits star imports from this
submodule into downstream modules by raising an :class:`AttributeError`
exception on the first attempt to do so: e.g.,
.. code-block:: shell-session
>>> from beartype import *
AttributeError: module 'beartype' has no attribute 'STAR_IMPORTS_CONSIDERED_HARMFUL'
All package submodules intentionally define similar ``__all__`` list globals.
Why? Because ``__all__`` is antithetical to sane API design and facilitates
antipatterns across the Python ecosystem, including well-known harms associated
with star imports and lesser-known harms associated with the artificial notion
of an ``__all__``-driven "virtual public API:" to wit,
* **Competing standards.** Thanks to ``__all__``, Python now provides two
conflicting conceptions of what constitutes the public API for a package or
module:
* The **conventional public API.** By convention, all module- and
class-scoped attributes *not* prefixed by ``_`` are public and thus
comprise the public API. Indeed, the standard interpretation of star
imports for packages and modules defining *no* ``__all__`` list globals is
exactly this.
* The **virtual public API.** By mandate, all module-scoped attributes
explicitly listed by the ``__all__`` global are public and thus *also*
comprise the public API. Consider the worst case of a module artificially
constructing this global to list all private module-scoped attributes
prefixed by ``_``; then the intersection of the conventional and virtual
public APIs for that module would be the empty list and these two competing
standards would be the list negations of one another. Ergo, the virtual
public API has no meaningful relation to the conventional public API or any
public attributes actually defined by any package or module.
These conflicting notions are evidenced no more strongly than throughout the
Python stdlib itself. Some stdlib modules ignore all notions of a public or
private API altogether (e.g., the :mod:`inspect` module, which
unconditionally introspects all attributes of various types regardless of
nomenclature or listing in ``__all__``); others respect only the conventional
public API (e.g., the :mod:`xmlrpc` package, whose server implementation
ignores ``_``-prefixed methods); still others respect only the virtual public
API (e.g., the :mod:`pickletools` module, which conditionally introspects the
:mod:`pickle` module via its ``__all__`` list global); still others respect
either depending on context with bizarre exceptions (e.g., the :mod:`pydoc`
module, which ignores attributes excluded from ``__all__`` for packages and
modules defining ``__all__`` and otherwise ignores ``_``-prefixed attributes
excluding :class:`collections.namedtuple` instances, which are considered
public because... reasons).
Which of these conflicted interpretations is correct? None and all of them,
since there is no correct interpretation. This is bad. This is even badder
for packages contractually adhering to `semver (i.e., semantic versioning)
<semver_>`__, despite there existing no uniform agreement in the Python
community as to what constitutes a "public Python API."
* **Turing completeness.** Technically, both the conventional and virtual
public APIs are defined only dynamically at runtime by the current
Turing-complete Python interpreter. Pragmatically, the conventional public
API is usually declared statically; those conventional public APIs that do
conditionally declare public attributes (e.g., to circumvent platform
portability concerns) often go to great and agonizing pains to declare a
uniform API with stubs raising exceptions on undefined edge cases. Deciding
the conventional public API for a package or module is thus usually trivial.
However, deciding the virtual public API for the same package or module is
often non-trivial or even infeasible. While many packages and modules
statically define ``__all__`` to be a simple context-independent list, others
dynamically append and extend ``__all__`` with context-dependent list
operations mystically depending on heterogeneous context *not* decidable at
authoring time -- including obscure incantations such as the active platform,
flags and options enabled at compilation time for the active Python
interpreter and C extensions, the conjunction of celestial bodies in
accordance with astrological scripture, and abject horrors like dynamically
extending the ``__all__`` exported from one submodule with the ``__all__``
exported from another not under the control of the author of the first.
(``__all__`` gonna ``__all__``, bro.)
* **Extrinsic cognitive load.** To decide what constitutes the "public API" for
any given package or module, rational decision-making humans supposedly
submit to a sadistic heuristic resembling the following:
* Does the package in question define a non-empty ``__init__`` submodule
defining a non-empty ``__all__`` list global? If so, the attributes listed
by this global comprise that package's public API. Since ``__all__`` is
defined only at runtime by a Turing-complete interpreter, however,
deciding these attributes is itself a Turing-complete problem.
* Else, all non-underscored attributes comprise that package's public API.
Since these attributes are also defined only at runtime by a
Turing-complete interpreter, deciding these attributes is again a
Turing-complete problem -- albeit typically less so. (See above.)
* **Redundancy.** ``__all__`` violates the DRY (Don't Repeat Yourself)
principle, thus inviting accidental desynchronization and omissions between
the conventional and virtual public APIs for a package or module. This leads
directly to...
* **Fragility.** By definition, accidentally excluding a public attribute from
the conventional public API is infeasible; either an attribute is public by
convention or it isn't. Conversely, accidentally omitting a public attribute
from the virtual public API is a trivial and all-too-common mishap. Numerous
stdlib packages and modules do so. This includes the pivotal :mod:`socket`
module, whose implementation in the Python 3.6.x series accidentally excludes
the public :func:`socket.socketpair` function from ``__all__`` if and only if
the private :mod:`_socket` C extension also defines the same function -- a
condition with no reasonable justification. Or *is* there? Dare mo shiranai.
* **Inconsistency.** Various modules and packages that declare ``__all__``
randomly exclude public attributes for spurious and frankly indefensible
reasons. This includes the stdlib :mod:`typing` module, whose preamble reads:
The pseudo-submodules 're' and 'io' are part of the public
namespace, but excluded from __all__ because they might stomp on
legitimate imports of those modules.
This is the worst of all possible worlds. A package or module either:
* Leave ``__all__`` undefined (as most packages and modules do).
* Prohibit ``__all__`` (as :mod:`beartype` does).
* Define ``__all__`` in a self-consistent manner conforming to
well-established semantics, conventions, and expectations.
* **Nonconsensus.** No consensus exists amongst either stdlib developers or the
Python community as a whole as to the interpretation of ``__all__``.
Third-party authors usually ignore ``__all__`` with respect to its role in
declaring a virtual public API, instead treating ``__all__`` as a means of
restricting star imports to some well-defined subset of public attributes.
This includes SciPy, whose :attr:`scipy.__init__.__all__` list global
excludes most public subpackages of interest (e.g., :mod:`scipy.linalg`, a
subpackage of linear algebra routines) while paradoxically including some
public subpackages of little to no interest (e.g., :attr:`scipy.test`, a unit
test scaffold commonly run only by developers and maintainers).
* **Infeasibility.** We have established that no two packages or modules
(including both stdlib and third-party) agree as to the usage of ``__all__``.
Respecting the virtual public API would require authors to ignore public
attributes omitted from ``__all__``, including those omitted by either
accident or due to conflicting interpretations of ``__all__``. Since this is
pragmatically infeasible *and* since upstream packages cannot reasonably
prohibit downstream packages from importing public attributes either
accidentally or intentionally excluded from ``__all__``, most authors
justifiably ignore ``__all__``. (*So should you.*)
* **Insufficiency.** The ``__all__`` list global only applies to module-scoped
attributes; there exists no comparable special attribute for classes with
which to define a comparable "virtual public class API." Whereas the
conventional public API uniformly applies to *all* attributes regardless of
scoping, the virtual public API only applies to module-scoped attributes -- a
narrower and less broadly applicable use case.
* **Unnecessity.** The conventional public API already exists. The virtual
public API offers no tangible benefits over the conventional public API while0
offering all the above harms. Under any rational assessment, the virtual
public API can only be "considered harmful."
.. _semver:
https://semver.org
'''
| StarcoderdataPython |
9689065 | <filename>recv.py
import random
EXTENDED_ASCII = 0
ASCII = 1
BASE64 = 2
BASE32 = 3
BASE16 = 4
BASE8 = 5
class Portal(object):
def __init__(mode = 3, waitCallback = None, secs = 0.2):
self.bins = ([256, 128, 64, 32, 16, 8])[mode]
if not self.bins:
raise AttributeException("Invalid mode")
elif waitCallback != None and not callable(waitCallback):
raise AttributeException("Invalid callback")
elif type(secs) != "<class 'int'>":
raise AttributeException("Invalid frequency duration")
self.waitCallback = waitCallback
self.secs = secs
def raw_transmit(transmitList, minfreq, maxfreq):
binsize = (maxfreq - minfreq) / self.bins
for freq in transmitList
| StarcoderdataPython |
6491416 | import r2cloud.api
import r2cloud.tools.common
import matplotlib.pyplot as plot
from scipy.io import wavfile
# init api
station = r2cloud.api('https://XXXXXXXXXX')
# login to r2cloud
station.login("XXXXXXXXXXXX", "XXXXXXXXXX")
# get all observatios of NOAA 19
observations = station.observationList()
# keep only with data
observations = r2cloud.tools.common.filterHasData(observations)
observations = r2cloud.tools.common.filterSat(observations, "NOAA 19")
# get last observation
last = observations[0].details()
#download wav
r2cloud.tools.common.bin2file(last.raw(), "test.wav")
#load wav
samplingFrequency, signalData = wavfile.read('test.wav')
# Plot the signal read from wav file
plot.subplot(211)
plot.title('Spectrogram of a wav ' + last.tle.line1)
plot.plot(signalData)
plot.xlabel('Sample')
plot.ylabel('Amplitude')
plot.subplot(212)
plot.specgram(signalData,Fs=samplingFrequency)
plot.xlabel('Time')
plot.ylabel('Frequency')
plot.show() | StarcoderdataPython |
8181356 | import datetime
from collections import namedtuple
import psycopg2
class Type:
def __init__(self, type_name, is_serial=False):
self.type_name = type_name
self.is_serial = is_serial
def __str__(self):
return self.type_name
def __repr__(self):
return self.type_name
serial32 = Type('serial', True)
serial64 = Type('bigserial', True)
int16 = Type('smallint')
int32 = Type('integer')
int64 = Type('bigint')
float32 = Type('real')
float64 = Type('double precision')
text = Type('text')
timestamp = Type('timestamp')
array_serial32 = Type('serial[]')
array_serial64 = Type('bigserial[]')
array_int16 = Type('smallint[]')
array_int32 = Type('integer[]')
array_int64 = Type('bigint[]')
array_float32 = Type('real[]')
array_float64 = Type('double precision[]')
array_text = Type('text[]')
array_timestamp = Type('timestamp[]')
class Col:
def __init__(self, name, type, tbl=None):
self.name = name
self.type = type
self.tbl = tbl
def __str__(self):
if self.tbl:
return f'{self.tbl}.{self.name}'
else:
return f'{self.name} {self.type.type_name}'
def __repr__(self):
if self.tbl:
return f'{self.tbl}.{self.name}'
else:
return f'{self.name} {self.type.type_name}'
class Idx:
def __init__(self, tbl, cols):
self.tbl = tbl
self.cols = cols
self.name = f'idx_{tbl._name}_{"_".join([c.name for c in cols])}'
def get_create_statement(self):
return f'CREATE INDEX IF NOT EXISTS {self.name} ON {self.tbl._name}({", ".join([c.name for c in self.cols])});'
class Tbl:
def __init__(self, name, alias=None):
self._name = name
self._alias = alias
def __str__(self):
return self._alias if self._alias else self._name
def __repr__(self):
return self._alias if self._alias else self._name
def __setattr__(self, name, value):
d = self.__dict__
if not name.startswith('_') and isinstance(value, Type):
if '_cols' not in d:
d['_cols'] = []
value = Col(name, value, self)
d['_cols'].append(value)
d[name] = value
def get_primary_key_cols(self):
if not hasattr(self, '_primary_key_cols'):
self._primary_key_cols = []
return self._primary_key_cols
def get_indices(self):
if not hasattr(self, '_indices'):
self._indices = []
return self._indices
def pk(self, cols):
self.get_primary_key_cols().extend(cols)
def idx(self, cols):
self.get_indices().append(Idx(self, cols))
def get_cols(self, filter=None):
d = self.__dict__
if '_cols' in d:
cols = d['_cols']
else:
cols = []
d['_cols'] = cols
if filter:
return [c for c in cols if filter(c)]
else:
return cols
def get_serial_cols(self):
return self.get_cols(lambda c: c.type.is_serial)
def get_record_type(self, filter=None):
return namedtuple(f'{self._name}_record', [c.name for c in self.get_cols(filter)])
def get_create_statement(self):
col_defs = [f'{c.name} {c.type.type_name}' for c in self.get_cols()]
col_defs = ','.join(col_defs)
pk_defs = [c.name for c in self.get_primary_key_cols()]
if len(pk_defs) != 0:
pk_defs = f',PRIMARY KEY({",".join(pk_defs)})'
else:
pk_defs = ''
if len(self.get_indices()) == 0:
create_index_statement = ''
else:
create_index_statement = ';'.join([idx.get_create_statement() for idx in self.get_indices()]) + ';'
return f'CREATE TABLE IF NOT EXISTS {self._name} ({col_defs}{pk_defs});{create_index_statement}'
def get_insert(self, filter=None):
cols = [c.name for c in self.get_cols(filter)]
colps = ','.join(['%s' for _ in cols])
cols = ','.join(cols)
sql = f'INSERT INTO {self._name}({cols}) VALUES({colps});'
def insert(cursor, record):
cursor.execute(sql, record)
return insert
def get_inserts(self, filter=None):
cols = [c.name for c in self.get_cols(filter)]
cols = ','.join(cols)
sql = f'INSERT INTO {self._name}({cols}) VALUES'
def insert(cursor, records):
values = []
params = []
for r in records:
values.append(f'({",".join(["%s" for _ in r])})')
params.extend(r)
values = ",".join(values)
cursor.execute(sql + values, params)
return insert
def get_is_exists(self, filter=None):
condition = ' AND '.join([f'{c.name}=%s' for c in self.get_cols(filter)])
sql = f'SELECT 1 FROM {self._name} WHERE {condition};'
def is_exists(cursor, record):
cursor.execute(sql, record)
exists = False
for _ in cursor:
exists = True
return exists
return is_exists
def get_find(self, select_cols, filter=None):
return_record_type = namedtuple(f'{self._name}_found_record', [c.name for c in select_cols])
select_col_names = ','.join([c.name for c in select_cols])
condition = ' AND '.join([f'{c.name}=%s' for c in self.get_cols(filter)])
sql = f'SELECT {select_col_names} FROM {self._name} WHERE {condition} LIMIT 1;'
def find(cursor, record):
cursor.execute(sql, record)
found_record = None
for r in cursor:
found_record = return_record_type(*r)
return found_record
return find
class SqlBuildable:
def __init__(self, owner):
self.owner = owner
def build(self, forward_buffer, backward_buffer):
if self.owner:
backward_buffer.append(self)
self.owner.build(forward_buffer, backward_buffer)
def sql(self):
forward_buffer = []
backward_buffer = []
self.build(forward_buffer, backward_buffer)
return '\n'.join(forward_buffer)
def record_type(self):
o = self
while o:
if isinstance(o, Select):
names = []
used_names = {}
for c in o.cols:
name = c.name
if name in used_names:
used_names[name] += 1
name += str(used_names[name])
else:
used_names[name] = 0
names.append(name)
return namedtuple('record_type', names)
o = o.owner
raise 'No columns in SQL.'
def read(self, cursor, *params):
rt = self.record_type()
sql = self.sql()
cursor.execute(sql, params)
for r in cursor:
yield rt(*r)
class JoinOwner:
def join(self, tbl):
return Join(self, tbl, 'INNER')
def left_join(self, tbl):
return Join(self, tbl, 'LEFT')
class WhereOwner:
def where(self, condition):
return Where(self, condition)
class OrderByOwner:
def order_by(self, cols):
return OrderBy(self, cols)
class LimitOwner:
def limit(self, count):
return Limit(self, count)
class Condition:
def __init__(self, owner, condition):
self.owner = owner
self.condition = condition
class Select(SqlBuildable):
def __init__(self, cols):
SqlBuildable.__init__(self, None)
self.cols = cols
def frm(self, tbl):
return From(self, tbl)
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
forward_buffer.append(f'SELECT {",".join([str(c) for c in self.cols])}')
class From(SqlBuildable, JoinOwner, WhereOwner, OrderByOwner, LimitOwner):
def __init__(self, owner, tbl):
SqlBuildable.__init__(self, owner)
self.tbl = tbl
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
s = f'{self.tbl._name} {self.tbl._alias}' if self.tbl._alias else self.tbl._name
forward_buffer.append(f'FROM {s}')
class Join(SqlBuildable):
def __init__(self, owner, tbl, join_type='INNER'):
SqlBuildable.__init__(self, owner)
self.tbl = tbl
self.join_type = join_type
def on(self, condition):
return On(self, condition)
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
s = f'{self.tbl._name} {self.tbl._alias}' if self.tbl._alias else self.tbl._name
forward_buffer.append(f'{self.join_type} JOIN {s}')
class On(SqlBuildable, JoinOwner, WhereOwner, OrderByOwner, LimitOwner):
def __init__(self, owner, condition):
SqlBuildable.__init__(self, owner)
self.condition = condition
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
forward_buffer.append(f'ON {self.condition}')
class Where(SqlBuildable, OrderByOwner, LimitOwner):
def __init__(self, owner, condition):
SqlBuildable.__init__(self, owner)
self.condition = condition
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
forward_buffer.append(f'WHERE {self.condition}')
class OrderBy(SqlBuildable, LimitOwner):
def __init__(self, owner, cols):
SqlBuildable.__init__(self, owner)
self.cols = cols
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
forward_buffer.append(f'ORDER BY {",".join([str(c) for c in self.cols])}')
class Limit(SqlBuildable):
def __init__(self, owner, count):
SqlBuildable.__init__(self, owner)
self.count = count
def build(self, forward_buffer, backward_buffer):
SqlBuildable.build(self, forward_buffer, backward_buffer)
forward_buffer.append(f'LIMIT {self.count}')
def select(cols):
return Select(cols)
| StarcoderdataPython |
174254 | from collections import namedtuple
from .command import Command
from .utils import update_termination_protection, \
is_stack_does_not_exist_exception
class StackDeleteOptions(namedtuple('StackDeleteOptions',
['no_wait',
'ignore_missing'])):
pass
class StackDeleteCommand(Command):
SKIP_UPDATE_REFERENCES = True
def run(self, stack_context):
# stack contexts
session = stack_context.session
parameters = stack_context.parameters
metadata = stack_context.metadata
self.ppt.pprint_stack_name(stack_context.stack_key,
parameters['StackName'],
'Deleting stack ')
# create boto3 cfn resource
cfn = session.resource('cloudformation')
self.ppt.pprint_session(session)
self.ppt.pprint_parameters(parameters)
# call boto3
stack = cfn.Stack(parameters['StackName'])
try:
update_termination_protection(
session,
parameters.pop('EnableTerminationProtection', None),
parameters['StackName'],
self.ppt)
self.ppt.pprint_stack(stack)
stack.delete()
except Exception as ex:
if self.options.ignore_missing and \
is_stack_does_not_exist_exception(ex):
self.ppt.secho(str(ex), fg='red')
return
else:
raise
# wait until delete complete
if self.options.no_wait:
self.ppt.secho('Stack is being deleted.')
else:
self.ppt.wait_until_delete_complete(session, stack)
self.ppt.secho('Stack delete complete.', fg='green')
| StarcoderdataPython |
1935114 | <reponame>Amourspirit/ooo_uno_tmpl
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sdb.application
import typing
from abc import abstractmethod, ABC
if typing.TYPE_CHECKING:
from ...graphic.x_graphic import XGraphic as XGraphic_a4da0afc
from .x_database_document_ui import XDatabaseDocumentUI as XDatabaseDocumentUI_be9c124d
from ...uno.x_interface import XInterface as XInterface_8f010a43
class XTableUIProvider(ABC):
"""
is used by the database application to obtain non-default user interface information and/or components for database tables.
**since**
OOo 2.2
See Also:
`API XTableUIProvider <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1sdb_1_1application_1_1XTableUIProvider.html>`_
"""
__ooo_ns__: str = 'com.sun.star.sdb.application'
__ooo_full_ns__: str = 'com.sun.star.sdb.application.XTableUIProvider'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.sdb.application.XTableUIProvider'
@abstractmethod
def getTableEditor(self, DocumentUI: 'XDatabaseDocumentUI_be9c124d', TableName: str) -> 'XInterface_8f010a43':
"""
returns a component which can be used to edit the definition of an existing table.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
com.sun.star.lang.WrappedTargetException: ``WrappedTargetException``
"""
@abstractmethod
def getTableIcon(self, TableName: str, ColorMode: int) -> 'XGraphic_a4da0afc':
"""
provides the icon which should be used to represent the table in the database application window.
The icon will usually be requested once per table, and cached. It might be requested again if the application settings change, for instance, if another desktop theme has been activated.
"""
__all__ = ['XTableUIProvider']
| StarcoderdataPython |
9787870 | """Test constants."""
from .const_account_family import PRIMARY_EMAIL, APPLE_ID_EMAIL, ICLOUD_ID_EMAIL
# Base
AUTHENTICATED_USER = PRIMARY_EMAIL
REQUIRES_2FA_TOKEN = "requires_2fa_token"
REQUIRES_2FA_USER = "requires_2fa_user"
VALID_USERS = [AUTHENTICATED_USER, REQUIRES_2FA_USER, APPLE_ID_EMAIL, ICLOUD_ID_EMAIL]
VALID_PASSWORD = "<PASSWORD>"
VALID_COOKIE = "valid_cookie"
VALID_TOKEN = "valid_token"
VALID_2FA_CODE = "000000"
VALID_TOKENS = [VALID_TOKEN, REQUIRES_2FA_TOKEN]
CLIENT_ID = "client_id"
| StarcoderdataPython |
3210362 | # O(n) runtime with memoization, O(n) space
def find_path(grid, r = 0, c = 0, cache = {}):
if len(grid) == 0 or len(grid[0]) == 0:
return False, None
if grid[r][c] == 1:
return False, None
loc = [(r,c)]
if loc[0] in cache:
return cache[loc[0]]
if r == len(grid) - 1 and c == len(grid[0]) - 1:
cache[loc[0]] = True, loc
return True, loc
if c < len(grid[0]) - 1:
found, path = find_path(grid, r, c + 1)
if found:
loc.extend(path)
cache[loc[0]] = True, loc
return True, loc
if r < len(grid) - 1:
found, path = find_path(grid, r+1, c)
if found:
loc.extend(path)
cache[loc[0]] = True, loc
return True, loc
return False, None
"""
# O(2^n) runtime, O(n) space
def find_path(grid, r = 0, c = 0):
if len(grid) == 0 or len(grid[0]) == 0:
return False, None
if grid[r][c] == 1:
return False, None
loc = [(r,c)]
if r == len(grid) - 1 and c == len(grid[0]) - 1:
return True, loc
if c < len(grid[0]) - 1:
found, path = find_path(grid, r, c + 1)
if found:
loc.extend(path)
return True, loc
if r < len(grid) - 1:
found, path = find_path(grid, r+1, c)
if found:
loc.extend(path)
return True, loc
return False, None
""" | StarcoderdataPython |
11284910 | <gh_stars>0
import json
import logging
import random
import string
import sys
import traceback
from os import environ
from loguru import logger
from settings import conf
LOG_LEVEL = conf.LOG_LEVEL
DEFAULT_FORMAT = (
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
"<level>{message}</level> "
"{extra}"
)
def gcloud_serializer(message):
"""
Serializer for tweaking log record so it can be parsed by Google Cloud
"""
# https://github.com/Delgan/loguru/issues/203
record = message.record
if message.record["exception"] is None:
severity = record["level"].name
raw_msg = record["message"]
message = record["message"] + " | " + str(record["extra"])
else:
severity = "CRITICAL"
exc = message.record["exception"]
tb = traceback.format_exception(exc.type, exc.value, exc.traceback)
raw_msg = "".join(tb)
message = record["message"] + raw_msg
google_trace_id = record["extra"].pop("google_trace_id", None)
log_data = {
"severity": severity,
"raw": raw_msg, # Displayed while expanding log record
"message": message, # Displayed directly in log viewer
"extra": record["extra"],
"time": record["time"],
}
if google_trace_id:
log_data[
"logging.googleapis.com/trace"
] = f"projects/{conf.GCLOUD_PROJECT}/traces/{google_trace_id}"
serialized = json.dumps(log_data, default=str)
print(serialized, file=sys.stderr)
async def inject_request_id_middleware(request, call_next):
"""
FastAPI middleware for injecting request_id and Google Trace ID if exists.
Usage: `app.middleware("http")(inject_request_id_middleware)`
"""
request_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
context = {"request_id": request_id}
trace_header = request.headers.get("X-Cloud-Trace-Context")
if trace_header:
context["google_trace_id"] = trace_header.split("/")[0]
with logger.contextualize(**context):
return await call_next(request)
class InterceptHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover
logger_opt = logger.opt(depth=7, exception=record.exc_info)
logger_opt.log(record.levelname, record.getMessage())
def init_logging(logger_, local_env: bool):
"""
Set up logging handlers. Output format is applied based on running environment
:param logger_: Logger instance
:param local_env: Whether current environment is local
"""
# https://pawamoy.github.io/posts/unify-logging-for-a-gunicorn-uvicorn-app/
# intercept everything at the root logger
logging.root.handlers = [InterceptHandler()]
# remove every other logger's handlers and propagate to root logger
for name in logging.root.manager.loggerDict.keys():
logging.getLogger(name).handlers = []
logging.getLogger(name).propagate = True
logger_.remove()
if local_env:
logger_.add(
sys.stdout,
format=environ.get("LOGURU_FORMAT", DEFAULT_FORMAT),
colorize=True,
level=LOG_LEVEL,
)
else:
logger_.add(gcloud_serializer, level=LOG_LEVEL)
init_logging(logger, conf.is_local_env())
| StarcoderdataPython |
3540460 | #!/usr/bin/env python
##
## @file stripPackage.py
## @brief Strips the given package from the given SBML file.
## @author <NAME>
##
##
## This file is part of libSBML. Please visit http://sbml.org for more
## information about SBML, and the latest version of libSBML.
import sys
import os.path
import libsbml
def main (args):
"""usage: stripPackage.py input-filename package-to-strip output-filename
"""
if len(args) != 4:
print(main.__doc__)
sys.exit(1)
infile = args[1]
package = args[2]
outfile = args[3]
if not os.path.exists(infile):
print("[Error] %s : No such file." % (infile))
sys.exit(1)
reader = libsbml.SBMLReader()
writer = libsbml.SBMLWriter()
sbmldoc = reader.readSBML(infile)
if sbmldoc.getNumErrors() > 0:
if sbmldoc.getError(0).getErrorId() == libsbml.XMLFileUnreadable:
# Handle case of unreadable file here.
sbmldoc.printErrors()
elif sbmldoc.getError(0).getErrorId() == libsbml.XMLFileOperationError:
# Handle case of other file error here.
sbmldoc.printErrors()
else:
# Handle other error cases here.
sbmldoc.printErrors()
sys.exit(1)
props = libsbml.ConversionProperties()
props.addOption("stripPackage", True, "Strip SBML Level 3 package constructs from the model")
props.addOption("package", package, "Name of the SBML Level 3 package to be stripped")
if (sbmldoc.convert(props) != libsbml.LIBSBML_OPERATION_SUCCESS):
print("[Error] Conversion failed...")
sys.exit(1)
writer.writeSBML(sbmldoc, outfile)
print("[OK] stripped package '%s' from %s to %s" % (package, infile, outfile))
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
174211 | <gh_stars>100-1000
import cv2
capture = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
_, frame = capture.read()
fmask = fgbg.apply(frame)
cv2.imshow("Orignal Frame", frame)
cv2.imshow("F Mask", fmask)
if cv2.waitKey(30) & 0xff == 27:
break
capture.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
1740655 | <filename>Modulo_3/semana 2/treeview/treewview.py
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
#Disparo de evento al seleccionar
def item_selected(event):
for selected_item in tree.selection():
item = tree.item(selected_item)
record = item['values']
# show a message
showinfo(title='Information', message=','.join(record))
root = tk.Tk()
tree=ttk.Treeview(root)
#Definimos las columnas
tree["columns"]=("uno","dos","tres")
tree.column("#0", width=0, minwidth=270, stretch=tk.NO)
tree.column("uno", width=150, minwidth=150, stretch=tk.NO)
tree.column("dos", width=400, minwidth=200)
tree.column("tres", width=80, minwidth=50, stretch=tk.NO)
#Definir encabezado
tree.heading("#0",text="Nombre",anchor=tk.W)
tree.heading("uno", text="Fecha",anchor=tk.W)
tree.heading("dos", text="Tipo",anchor=tk.W)
tree.heading("tres", text="Tamaño",anchor=tk.W)
#Anclamos bloques
tree.pack(side=tk.TOP)
#Insertar fila
tree.insert('', tk.END, values=["Fila uno","Fila dos"," Fila tres"])
tree.insert('', tk.END, values=["Fila2 uno","Fila2 dos"," Fila2 tres"])
tree.bind('<<TreeviewSelect>>', item_selected)
root.mainloop() | StarcoderdataPython |
1630077 | <gh_stars>1-10
from .model import QANet | StarcoderdataPython |
12818158 | <filename>exifeditor.py
import piexif, os
from datetime import datetime
def filename2date(filename):
date = datetime.strptime('20' + filename, r'%Y-%m-%d_%H%M')
return date.strftime(r'%Y:%m:%d %H:%M')
def editexif(date, file):
exif_dict = piexif.load(file)
exif_dict['Exif'][36867] = date
exif_byte = piexif.dump(exif_dict)
piexif.insert(exif_byte, file)
if __name__ == "__main__":
files = os.listdir('.')
for file in files:
if os.path.splitext(file)[1] == '.jpg':
filename = os.path.splitext(file)[0]
date = filename2date(filename)
editexif(date, file)
| StarcoderdataPython |
11361203 | <filename>models/caption_module.py
import torch
import torch.nn as nn
import numpy as np
from torch.nn.modules import dropout
class Caption_Module_Architecture(nn.Module):
def __init__(self,options, device) -> None:
"""This is a captioning Constructor Function
Args:
Returns:
None
Raises:
None
"""
super(Caption_Module_Architecture, self).__init__()
self.device = device
# overall project options as a dict
self.options = options
# architecture parameters
self.attention_hidden_size = options["video_feat_dim"]
self.input_dim = options["video_feat_dim"]
self.hidden_dim = options["encoded_video_feat_dim"]
self.num_layers = options["num_rnn_layers"]
#self.num_layers = 10
self.num_anchors = options["num_anchors"]
self.batch_size = options["batch_size"]
# layers
## fix the input dimensions, hidden dim should stay the same input dim is flattened weighted average and the fw bw hidden together but maybe after the cg stuff its just a single 512 vector instead of both layers
self.lstm_caption_1 = nn.LSTM(2 * self.hidden_dim + 2 * self.hidden_dim, self.hidden_dim, self.num_layers, batch_first=True, dropout = self.options["rnn_drop"])
# apply weight norm to lstm layer
lstm_params = dict(self.lstm_caption_1.named_parameters())
for name in lstm_params.keys():
if "bias" not in name:
nn.utils.weight_norm(self.lstm_caption_1, name)
self.fc_caption = nn.utils.weight_norm(nn.Linear(self.hidden_dim, self.options['vocab_size']))
# remove batch dim and change batch to 110 max len
self.fc_attn_1 = nn.utils.weight_norm(nn.Linear(self.input_dim + self.hidden_dim + self.hidden_dim + 2 * self.hidden_dim, self.attention_hidden_size))
# output dim is 1 because batch is the 110 so we get the 110 values out
self.fc_attn_2 = nn.utils.weight_norm(nn.Linear(self.attention_hidden_size, 1))
# visual context gate, for cg to apply will probably need to change output to hidden dim 512 instead of 500
self.fc_cg_1 = nn.utils.weight_norm(nn.Linear(self.input_dim, 2 * self.hidden_dim))
# fw bw context gate, 2 is the batch for each fw bw
# self.fc_cg_2 = nn.utils.weight_norm(nn.Linear(2 * self.hidden_dim, 2 * self.hidden_dim))
## combined context gate of all features, input, 2 layers and fw + bw so 4x, word embedding, hidden state of previous word
# combined context gate of all features, input, fw/bw combined after cg2, word embedding, hidden state of previous word (do we just take final layer or both though 2x for now)
self.fc_cg_3 = nn.utils.weight_norm(nn.Linear(2 * self.hidden_dim + 2 * self.hidden_dim + self.options['word_embed_size'] + 2 * self.hidden_dim , 2 * self.hidden_dim))
# dim might need to change to -1 not sure, we make the values over total feats sum to 1 instead of the total of each feature, so technically columns must add to one and not rows, so not individual feats but the sum of each individual value over all feats in that position
self.logsoftmax_norm = nn.LogSoftmax(dim = 1)
self.softmax_norm = nn.Softmax(dim = 1)
self.tanh_norm = nn.Tanh()
self.embeddings = nn.utils.weight_norm(nn.Embedding(self.options['vocab_size'], self.options['word_embed_size']))
# run x in a loop
def forward(self, feats_ac, temp_proposals_hs_fw, temp_proposals_hs_bw, proposal_caption_fw, proposal_caption_bw):
if(torch.sum(proposal_caption_fw[0][0]) == 0.):
idx = min(110, feats_ac.shape[1])
proposal_caption_fw[0][0][idx - 1] = 1
end_id = torch.nonzero(proposal_caption_fw[0][0]).view(-1)
start_id_bw = proposal_caption_bw[0][0][end_id].view(-1)
start_id = feats_ac.shape[1] -1 - start_id_bw
if(start_id.nelement() == 0):
start_id = torch.zeros(1, dtype=torch.int32)
caption_feats, caption_mask = self.get_video_seq(feats_ac, start_id, end_id, 110)
caption_feats = caption_feats.to(self.device)
# should specify number of layers but we for loop through so we dont.
hidden_state = torch.zeros(self.num_layers, caption_feats.shape[0], self.hidden_dim)
cell_state = torch.zeros(self.num_layers, caption_feats.shape[0], self.hidden_dim)
hidden_state = hidden_state.to(self.device)
cell_state = cell_state.to(self.device)
hidden = (hidden_state, cell_state)
fw_hs = temp_proposals_hs_fw[start_id.long()]
bw_hs = temp_proposals_hs_bw[(feats_ac.shape[1] -1 - end_id).long()]
#https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.htm
word_id = torch.Tensor(len(caption_feats))
word_id = word_id.fill_(self.options['vocab']['<START>'])
word_id = word_id
word_id = word_id.long()
word_id = word_id.to(self.device)
word_ids = word_id
word_ids = word_ids.unsqueeze(-1)
word_conficences = torch.Tensor(len(caption_feats))
word_conficences = word_conficences.fill_(1.)
word_conficences = word_conficences.unsqueeze(-1)
word_conficences = word_conficences.to(self.device)
fc_word_scores = torch.zeros(caption_feats.shape[0], 1, self.options['vocab_size'], dtype=torch.float32)
fc_word_scores = fc_word_scores.to(self.device)
fc_word_scores[:,0,self.options['vocab']['<START>']] = 1.
# here we loop for the longest possible time
for i in range(self.options['caption_seq_len']-1):
word_embed = self.embeddings(word_id)
# print(word_embed)
hidden_state_reshape = torch.reshape(hidden_state, (-1,self.num_layers*self.hidden_dim))
# concat for attn
concat_hs = torch.cat((torch.cat((fw_hs, bw_hs), -1), hidden_state_reshape), -1)
# print(concat_hs.shape)
concat_hs = concat_hs.unsqueeze(axis=1)
# print(concat_hs.shape)
concat_hs = concat_hs.repeat(1,110,1)
# print(fw_hs.shape, bw_hs.shape, hidden_state_reshape.shape, concat_hs.shape, caption_feats.shape)
tile = torch.cat((caption_feats, concat_hs), -1)
tile = tile.float()
# print(tile.shape)
# do attn, pass tile through caption
attn_out = self.fc_attn_1(tile)
attn_out = self.tanh_norm(attn_out)
attn_out = self.fc_attn_2(attn_out)
attn_out = self.softmax_norm(attn_out)
attn_weighted_feats = torch.mul(caption_feats, attn_out)
# returns a [#actions, 1, 500]
attn_weighted_input = torch.sum(attn_weighted_feats, dim = 1, dtype=torch.float32)
# print(attn_weighted_input.shape)
# do cg
cg_weighted_input = self.fc_cg_1(attn_weighted_input)
cg_weighted_input = self.tanh_norm(cg_weighted_input)
# print(cg_weighted_input.shape)
# cg_weighted_fw_bw = self.fc_cg_2(torch.cat((fw_hs, bw_hs), -1))
# cg_weighted_fw_bw = self.tanh_norm(cg_weighted_fw_bw)
cg_weighted_fw_bw = torch.cat((fw_hs, bw_hs), -1)
# print(cg_weighted_fw_bw.shape)
# this will need fixing
# print(cg_weighted_fw_bw.shape, cg_weighted_input.shape, hidden_state_reshape.shape, word_embed.shape)
cg_final = self.fc_cg_3(torch.cat((torch.cat((torch.cat((cg_weighted_fw_bw, cg_weighted_input), -1), hidden_state_reshape), -1), word_embed), -1))
cg_final = self.softmax_norm(cg_final)
# print(cg_final.shape)
gated_input = (1.0-cg_final) * cg_weighted_input
gated_hs = cg_final * cg_weighted_fw_bw
# print(gated_hs.shape)
gated_caption_input = torch.cat((gated_input, gated_hs), -1)
gated_caption_input = gated_caption_input.unsqueeze(1)
# print(gated_caption_input.shape)
# do caption
caption, cell_hidden = self.lstm_caption_1(gated_caption_input, hidden)
hidden_state = cell_hidden[0]
cell_state = cell_hidden[1]
hidden = cell_hidden
caption_fc = self.fc_caption(caption)
# print(caption_fc.shape)
# print(caption_fc)
# print(caption_fc.shape)
caption_word_norm = self.softmax_norm(caption_fc.view(caption_feats.shape[0],-1))
# # print(caption_fc.sum(dim=1))
# # print(caption_fc.shape)
# # quit()
caption_word_norm = caption_word_norm.unsqueeze(1)
fc_word_scores = torch.cat((fc_word_scores, caption_fc), dim = 1)
word_id = torch.argmax(caption_word_norm, dim=-1)
# print(word_id)
word_ids = torch.cat((word_ids, word_id), -1)
word_id = word_id.view(-1)
# print(word_ids.shape)
word_confidence = torch.max(caption_word_norm, dim=-1)
# print(word_confidence)
# print(word_confidence[0].shape, word_conficences.shape)
word_conficences = torch.cat((word_conficences, word_confidence[0]), -1)
return word_ids, fc_word_scores, start_id, end_id #sentences, word_conficences #word_ids, word_confidences
def caption_eval(self, caption_feats, event_hidden_feats_fw, event_hidden_feats_bw):
with torch.no_grad():
caption_feats = caption_feats.to(self.device)
fw_hs = event_hidden_feats_fw
bw_hs = event_hidden_feats_bw
# should specify number of layers but we for loop through so we dont.
hidden_state = torch.zeros(self.num_layers, caption_feats.shape[0], self.hidden_dim)
cell_state = torch.zeros(self.num_layers, caption_feats.shape[0], self.hidden_dim)
hidden_state = hidden_state.to(self.device)
cell_state = cell_state.to(self.device)
hidden = (hidden_state, cell_state)
#https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.htm
word_id = torch.Tensor(len(caption_feats))
word_id = word_id.fill_(self.options['vocab']['<START>'])
word_id = word_id
word_id = word_id.long()
word_id = word_id.to(self.device)
word_ids = word_id
word_ids = word_ids.unsqueeze(-1)
word_conficences = torch.Tensor(len(caption_feats))
word_conficences = word_conficences.fill_(1.)
word_conficences = word_conficences.unsqueeze(-1)
word_conficences = word_conficences.to(self.device)
fc_word_scores = torch.zeros(caption_feats.shape[0], 1, self.options['vocab_size'], dtype=torch.float32)
fc_word_scores = fc_word_scores.to(self.device)
fc_word_scores[:,0,self.options['vocab']['<START>']] = 1.
# here we loop for the longest possible time
for i in range(self.options['caption_seq_len']-1):
word_embed = self.embeddings(word_id)
# print(word_embed)
hidden_state_reshape = torch.reshape(hidden_state, (-1,self.num_layers*self.hidden_dim))
# concat for attn
concat_hs = torch.cat((torch.cat((fw_hs, bw_hs), -1), hidden_state_reshape), -1)
# print(concat_hs.shape)
concat_hs = concat_hs.unsqueeze(axis=1)
# print(concat_hs.shape)
concat_hs = concat_hs.repeat(1,110,1)
# print(fw_hs.shape, bw_hs.shape, hidden_state_reshape.shape, concat_hs.shape, caption_feats.shape)
tile = torch.cat((caption_feats, concat_hs), -1)
tile = tile.float()
# print(tile.shape)
# do attn, pass tile through caption
attn_out = self.fc_attn_1(tile)
attn_out = self.tanh_norm(attn_out)
attn_out = self.fc_attn_2(attn_out)
attn_out = self.softmax_norm(attn_out)
attn_weighted_feats = torch.mul(caption_feats, attn_out)
# returns a [#actions, 1, 500]
attn_weighted_input = torch.sum(attn_weighted_feats, dim = 1, dtype=torch.float32)
# print(attn_weighted_input.shape)
# do cg
cg_weighted_input = self.fc_cg_1(attn_weighted_input)
cg_weighted_input = self.tanh_norm(cg_weighted_input)
# print(cg_weighted_input.shape)
#cg_weighted_fw_bw = self.fc_cg_2(torch.cat((fw_hs, bw_hs), -1))
#cg_weighted_fw_bw = self.tanh_norm(cg_weighted_fw_bw)
cg_weighted_fw_bw = torch.cat((fw_hs, bw_hs), -1)
# print(cg_weighted_fw_bw.shape)
# this will need fixing
# print(cg_weighted_fw_bw.shape, cg_weighted_input.shape, hidden_state_reshape.shape, word_embed.shape)
cg_final = self.fc_cg_3(torch.cat((torch.cat((torch.cat((cg_weighted_fw_bw, cg_weighted_input), -1), hidden_state_reshape), -1), word_embed), -1))
cg_final = self.softmax_norm(cg_final)
# print(cg_final.shape)
gated_input = (1.0-cg_final) * cg_weighted_input
gated_hs = cg_final * cg_weighted_fw_bw
# print(gated_hs.shape)
gated_caption_input = torch.cat((gated_input, gated_hs), -1)
gated_caption_input = gated_caption_input.unsqueeze(1)
# print(gated_caption_input.shape)
# do caption
caption, cell_hidden = self.lstm_caption_1(gated_caption_input, hidden)
hidden_state = cell_hidden[0]
cell_state = cell_hidden[1]
hidden = cell_hidden
caption_fc = self.fc_caption(caption)
# print(caption_fc.shape)
# print(caption_fc)
# print(caption_fc.shape)
caption_word_norm = self.softmax_norm(caption_fc.view(caption_feats.shape[0],-1))
# # print(caption_fc.sum(dim=1))
# # print(caption_fc.shape)
# # quit()
caption_word_norm = caption_word_norm.unsqueeze(1)
fc_word_scores = torch.cat((fc_word_scores, caption_fc), dim = 1)
word_id = torch.argmax(caption_word_norm, dim=-1)
# print(word_id)
word_ids = torch.cat((word_ids, word_id), -1)
word_id = word_id.view(-1)
# print(word_ids.shape)
word_confidence = torch.max(caption_word_norm, dim=-1)
# print(word_confidence)
# print(word_confidence[0].shape, word_conficences.shape)
word_conficences = torch.cat((word_conficences, word_confidence[0]), -1)
return word_ids, fc_word_scores #sentences, word_conficences #word_ids, word_confidences
"""get video proposal representation (feature sequence), given start end feature ids, all of which are LISTS
"""
def get_video_seq(self, video_feat_sequence, start_ids, end_ids, max_clip_len):
#max_clip_len is longest number of features allowed to be apart 110
N = len(start_ids)
event_video_sequence = torch.empty((0, max_clip_len, self.options['video_feat_dim']), dtype=torch.float32)
event_video_sequence = event_video_sequence.to(self.device)
event_video_mask = torch.empty((0, max_clip_len), dtype=torch.int32)
for event_id in range(0, N):
start_id = start_ids[event_id]
end_id = end_ids[event_id] + 1
video_feats = video_feat_sequence[0][start_id:end_id]
clip_len = end_id - start_id
this_mask = torch.zeros(max_clip_len)
if clip_len < max_clip_len:
this_mask[0:len(video_feats)] = 1
zero_padding = torch.zeros((max_clip_len - clip_len, self.options['video_feat_dim']), dtype=torch.float32)
zero_padding = zero_padding.to(self.device)
video_feats = torch.cat((video_feats, zero_padding), dim = 0)
else:
this_mask[0:len(this_mask)] = 1
video_feats = video_feats[:max_clip_len]
video_feats = torch.unsqueeze(video_feats, dim = 0)
this_mask = torch.unsqueeze(this_mask, dim = 0)
# if start_ids[event_id] == end_ids[event_id]:
# print(event_video_sequence.shape, video_feats[0].shape)
try:
event_video_sequence = torch.cat((event_video_sequence, video_feats), dim=0)
except Exception as e:
print(e)
print(event_video_sequence.shape, video_feats[0].shape, video_feats.shape, clip_len, start_ids, end_ids)
print(start_id)
quit()
event_video_mask = torch.cat((event_video_mask, this_mask), dim=0)
# [#events, 110, 500]
return event_video_sequence, event_video_mask
| StarcoderdataPython |
6618918 | <filename>server.py
#!/usr/bin/env python
import bz2
import pickle
import argparse
from collections import defaultdict
from pathlib import Path
from flask import render_template, Flask, url_for
from flask_caching import Cache
from src.lib import parse_multeval_results_table, parse_ranksys
from src.utils import natural_sort
CONFIG = {
"CACHE_TYPE": "simple", # Flask-Caching related configs
"CACHE_DEFAULT_TIMEOUT": 36000,
}
app = Flask('mmt-ui')
app.config.from_mapping(CONFIG)
cache = Cache(app)
def get_tree_dict(folder):
"""Parses a folder hierarchy where each subfolder is a multeval
output folder that contains experiment results into a dict."""
def read_sources(fname):
"""Reads srcs.pkl.bz2 files to get source sentences used in MT
training for visualization purposes."""
try:
with bz2.BZ2File(fname, 'rb') as f:
d = pickle.load(f)
except Exception:
return None
return {k: d[v] if isinstance(v, str) else v for k, v in d.items()}
# Final dictionary has tasks as keys, test_sets as inner keys
# and a tuple of (path_to_folder, URL for results page, source sentences)
# as value
d = defaultdict(lambda: defaultdict(dict))
# The folder with experiment results
tasks = [exp.name for exp in Path(folder).iterdir() if exp.is_dir()]
tasks = natural_sort(tasks)
for task in tasks:
# Each subfolder is an experiment's multeval results
# srclang-trglang_<task description>
slang, tlang = task.split('_', 1)[0].split('-')
for test_set in Path(f'{folder}/{task}').iterdir():
source_dict = read_sources(test_set / 'srcs.pkl.bz2')
d[task][test_set.name] = (
Path(f'{folder}/{task}/{test_set.name}'),
url_for('results', task=task, testset=test_set.name),
source_dict)
return d
@app.route("/")
def index():
return render_template('index.html', tasks=get_tree_dict(app.config['results']))
@app.route("/<task>/<testset>")
@app.route("/<task>/<testset>/<system>")
@cache.memoize(timeout=36000)
def results(task, testset, system=None):
result_db = get_tree_dict(app.config['results'])
folder, _, source_dict = result_db[task][testset]
# Parse multeval table
results_table, baseline = parse_multeval_results_table(
folder / 'results.txt', task, testset)
kwargs = {'task': task, 'testset': testset, 'results_table': results_table}
if system is not None:
srcs = source_dict[system] if source_dict else None
kwargs['system'] = system
kwargs['baseline'] = baseline
kwargs['systems_table'] = parse_ranksys(
folder / 'ranksys', system, testset, srcs)
return render_template('view.html', **kwargs)
def main():
parser = argparse.ArgumentParser(prog='mmt-ui')
parser.add_argument('-r', '--results', help='Results folder',
required=True, type=str)
parser.add_argument('-p', '--port', help='Server port', default=8086)
parser.add_argument('-n', '--host', help='Host server IP', default='0.0.0.0')
parser.add_argument('-d', '--debug', help='Debug mode for Flask',
action='store_true')
parser.add_argument('-D', '--deploy', help='Enable deployment server',
action='store_true')
args = parser.parse_args()
app.config['results'] = args.results
app.config['DEBUG'] = args.debug
if args.deploy:
from waitress import serve
serve(app, host=args.host, port=args.port)
else:
app.run(host=args.host, port=args.port, threaded=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11232005 | <reponame>WallabyLester/Machine_Learning_From_Scratch
import numpy as np
def test_l1_regularization_forward():
"""
Test the forward pass of the L1Regularization class.
"""
from your_code import L1Regularization
X = np.array([[-1, 2, 1], [-3, 4, 1]])
regularizer = L1Regularization(reg_param=0.5)
_true = np.array([1.5, 3.5])
_est = np.array([regularizer.forward(x) for x in X])
assert np.allclose(_true, _est)
def test_l1_regularization_backward():
"""
Test the backward pass of the L1Regularization class.
"""
from your_code import L1Regularization
X = np.array([[-1, 2, 1], [-3, 4, 1]])
regularizer = L1Regularization(reg_param=0.5)
_true = np.array([[-0.5, 0.5, 0], [-0.5, 0.5, 0]])
_est = np.array([regularizer.backward(x) for x in X])
assert np.allclose(_true, _est)
def test_l2_regularization_forward():
"""
Test the forward pass of the L2Regularization class.
"""
from your_code import L2Regularization
X = np.array([[-1, 2, 1], [-3, 4, 1]])
regularizer = L2Regularization(reg_param=0.5)
_true = np.array([1.25, 6.25])
_est = np.array([regularizer.forward(x) for x in X])
assert np.allclose(_true, _est)
def test_l2_regularization_backward():
"""
Test the backward pass of the L2Regularization class.
"""
from your_code import L2Regularization
X = np.array([[-1, 2, 1], [-3, 4, 1]])
regularizer = L2Regularization(reg_param=0.5)
_true = np.array([[-0.5, 1, 0], [-1.5, 2, 0]])
_est = np.array([regularizer.backward(x) for x in X])
assert np.allclose(_true, _est)
| StarcoderdataPython |
98676 | <gh_stars>0
from .starts import prepare_seed, prepare_logger, get_machine_info, save_checkpoint, copy_checkpoint
from .optimizers import get_optim_scheduler | StarcoderdataPython |
189514 | from .model import BaseModel
from .tiramisu import DenseUNet, DenseBlock, DenseLayer
from .tiramisu import (
ModuleName,
DEFAULT_MODULE_BANK,
UPSAMPLE2D_NEAREST,
UPSAMPLE2D_PIXELSHUFFLE,
UPSAMPLE2D_TRANPOSE,
)
__all__ = [
"BaseModel",
"DenseUNet",
"DenseBlock",
"DenseLayer",
"ModuleName",
"DEFAULT_MODULE_BANK",
"UPSAMPLE2D_NEAREST",
"UPSAMPLE2D_PIXELSHUFFLE",
"UPSAMPLE2D_TRANPOSE",
]
| StarcoderdataPython |
1800203 | # -*- coding: utf-8 -*-
"""
Created on Thu May 27 10:41:46 2021
@author: freeridingeo
"""
import numpy as np
import sys
sys.path.append("D:/Code/eotopia/core")
import constants
sentinel1_parameter = dict()
sentinel1_parameter["wavelength"] = 0.5547 # m
sentinel1_parameter["antenna_length"] = 12.3 # m
sentinel1_parameter["antenna_width"] = 0.821 # m
sentinel1_parameter["antenna_area"] = sentinel1_parameter["antenna_length"]\
* sentinel1_parameter["antenna_width"]
sentinel1_parameter["iw1_lookangle"] = 32.9
sentinel1_parameter["iw2_lookangle"] = 38.3
sentinel1_parameter["iw3_lookangle"] = 43.1
sentinel1_parameter["iw1_slantrange_resolution"] = 2.7
sentinel1_parameter["iw2_slantrange_resolution"] = 3.1
sentinel1_parameter["iw3_slantrange_resolution"] = 3.5
sentinel1_parameter["iw1_range_bandwidth"] = 56.5*10**6 # Hz
sentinel1_parameter["iw2_range_bandwidth"] = 48.2*10**6 # Hz
sentinel1_parameter["iw3_range_bandwidth"] = 42.8*10**6 # Hz
sentinel1_parameter["iw1_azimuth_resolution"] = 22.5 # m
sentinel1_parameter["iw2_azimuth_resolution"] = 22.7 # m
sentinel1_parameter["iw3_azimuth_resolution"] = 22.6 # m
sentinel1_parameter["iw1_processing_bandwidth"] = 327 # Hz
sentinel1_parameter["iw2_processing_bandwidth"] = 313 # Hz
sentinel1_parameter["iw3_processing_bandwidth"] = 314 # Hz
sentinel1_parameter["center_frequency"] = 5.405 *10**9 # Hz
sentinel1_parameter["max_rng_bandwidth"] = 100.*10**6 # Hz
sentinel1_parameter["slantrng_pixelspacing"] = 2.3 # m
sentinel1_parameter["rng_sampling_frequency"] = 64.35*10**6 # Hz
sentinel1_parameter["az_pixelspacing"] = 14.1 # m
sentinel1_parameter["az_sampling_frequency"] = 489.49 # Hz
sentinel1_parameter["burst_length"] = 2.75 # sec ~ 20km
sentinel1_parameter["pulse_width_min"] = 5.*10**-6
sentinel1_parameter["pulse_width_max"] = 1000.*10**-6
sentinel1_parameter["pulse_duration"] = 6.1996*10**-5 # s
sentinel1_parameter["prf_min"] = 1000 # Hz
sentinel1_parameter["prf_max"] = 3000 # Hz
sentinel1_parameter["prf"] = 486.49 # Hz
sentinel1_parameter["chirp_slope"] = 7.79 * 10**11
sentinel1_parameter["ground_swath_width"] = 250000 # m
sentinel1_parameter["slice_length"] = 170000 # m
sentinel1_parameter["satellite_velocity"] = 7500. # m/s
sentinel1_parameter["satellite_height_min"] = 698000. # m
sentinel1_parameter["satellite_height_max"] = 726000. # m
sentinel1_parameter["satellite_height"] =\
(sentinel1_parameter["satellite_height_min"]\
+ sentinel1_parameter["satellite_height_max"]) / 2
sentinel1_parameter["system_noise"] = 3 # dB
sentinel1_parameter["system_loss_epsilon"] = 10.**(-5./10.) # assume 5 dB overall losses
sentinel1_parameter["half_power_bandwidth_l"] = .887 *\
sentinel1_parameter["wavelength"]\
/ sentinel1_parameter["antenna_length"]
sentinel1_parameter["half_power_bandwidth_w"] = .887 *\
sentinel1_parameter["wavelength"]\
/ sentinel1_parameter["antenna_width"]
# az_steering_angle_min = -0.9
# az_steering_angle_max = 0.9
# az_beam_width = 0.23 # Deg
# elevation_beam_width = 3.43 # Deg
# elevation_beam_steering_rng_min = -13. # Deg
# elevation_beam_steering_rng_max = 12.3 # Deg
# radarnoise_temperature = 300. # K # !!
# receiver_noise = k * radarnoise_temperature * max_rng_bandwidth
# receiver_noise_dB = 10.*np.log10(k * radarnoise_temperature * max_rng_bandwidth)
# range_bandwidth = max_rng_bandwidth # Hz
# pulse_rate = 1600. # Hz
# PRF = pulse_rate
# peakpower_transmit = 4000. # W # !!
# azimuth_sample_spacing = satellite_velocity/pulse_rate
# range_at_boresight = satellite_height / np.cos(offnadir_boresight) # m !!
# groundrange_at_boresight = satellite_height * np.sin(offnadir_boresight) # m !!
# range_at_nearbeam_edge = satellite_height /\
# np.cos(offnadir_boresight - half_power_bandwidth_w/2) # !!
# groundrange_at_nearbeam_edge = satellite_height *\
# np.sin(offnadir_boresight - half_power_bandwidth_w/2) # !!
# range_at_farbeam_edge = satellite_height /\
# np.cos(offnadir_boresight + half_power_bandwidth_w/2)# !!
# groundrange_at_farbeam_edge = satellite_height *\
# np.sin(offnadir_boresight + half_power_bandwidth_w/2)# !!
# range_swath = groundrange_at_farbeam_edge - groundrange_at_nearbeam_edge # m
# Delta_range = c / (2. * range_bandwidth)
# Delta_range_ng = Delta_range /\
# np.sin(offnadir_boresight - half_power_bandwidth_w/2)
# Delta_range_fg = Delta_range /\
# np.sin(offnadir_boresight + half_power_bandwidth_w/2)
# n_rs = int(np.round(range_swath/Delta_range))
# range_v = np.linspace(range_at_nearbeam_edge, range_at_farbeam_edge, n_rs)
# s_0 = 0. # reference azimuth for defining calculations
# range_0 = range_at_boresight # Reference range for calculations
# offnadir_boresight = 30. * np.pi/180. # !!
# azimuth_squint_of_boresight = 0. * np.pi/180. # !!
| StarcoderdataPython |
9602313 | import numpy as np
import matplotlib.pyplot as plt
deepsea="/home/fast/onimaru/encode/deepsea/deepsea_pred.txt"
deepshark="/home/fast/onimaru/encode/deepsea/deepshark_Tue_Apr_17_183529_2018.ckpt-57883_prediction.log"
deepsea_dict={}
with open(deepsea, 'r') as fin:
for line in fin:
if not line.startswith("Cell Type"):
#print line
line=line.split()
if len(line)==0:
continue
print(line)
if line[4]=="NA":
continue
sname=line[3].split('.')[0]
AUPRC=float(line[5])
deepsea_dict[sname]=AUPRC
sample_list=[]
deepsea_list=[]
deepshark_list=[]
with open(deepshark, 'r') as fin:
go=False
for line in fin:
if line.startswith("sample"):
go=True
continue
elif go:
line=line.split()
sname=line[0].split("_")[0]
if "Dnase" in sname and sname in deepsea_dict:
sample_list.append(sname)
deepsea_list.append(deepsea_dict[sname])
deepshark_list.append(float(line[2]))
print(sname, deepsea_dict[sname], float(line[2]))
deepsea_list=np.array(deepsea_list)
deepshark_list=np.array(deepshark_list)
log_fold=np.log2(deepshark_list/deepsea_list)
log_fold_neg=log_fold[log_fold<0.00]
print("total num: "+str(len(log_fold))+"\nless performed num:"+str(len(log_fold_neg))+" ("+str(len(log_fold_neg)/float(len(log_fold))*100.0)+"%)")
| StarcoderdataPython |
5073479 | <filename>tests/examples-good-external/7.py<gh_stars>1-10
import itertools
from itertools import groupby as uniq
import nonexistingmodule
itertools.groupby("AAASDAADSD")
uniq("AAAAA")
nonexistingmodule.array([1,2]) ## does not exist
| StarcoderdataPython |
4955758 | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
from optparse import OptionParser
##Parse the options
usage = "USAGE: python Plot_dat_file_catal.py --f1 First_dat_file --f2 Second_dat_file --c1 Chem1 --c2 Chem2 --jobid PNG_file_name\n"
parser = OptionParser(usage=usage)
##options
parser.add_option("--f1",help="First data file", dest="f1")
parser.add_option("--f2",help="Second data file", dest="f2")
parser.add_option("--c1",help="Chemical for label 1", dest="c1")
parser.add_option("--c2",help="Chemical for label 2", dest="c2")
parser.add_option("--jobid",help="Job name for exit file", dest="jobid")
(options, args) = parser.parse_args()
if (options.f1) and (options.f2) and (options.jobid) and (options.c1) and (options.c2) :
print ("Start ...")
else :
print (usage)
print ("Not enougth arguments\n")
quit()
# file = pd.read_csv("/Users/barradd/Desktop/PDOS_Zr-40.dat",sep=' ')
## read the file , and parse it to a list
file_list = [x.split() for x in open(options.f1).readlines() ]
## Make it a data frame
file_df = pd.DataFrame( file_list)
### give a Header
new_header = file_df.iloc[0] #grab the first row for the header
file_df = file_df[1:] #take the data less the header row
file_df.columns = new_header #set the header row as the df header
## make the columns float type
file_df["#Energy"] = file_df["#Energy"].astype(float, copy=True)
file_df["tot"] = file_df["tot"].astype(float, copy=True)
## read the second file and process it tha same way
file_list_2 = [x.split() for x in open(options.f2).readlines() ]
file_df2 = pd.DataFrame( file_list_2)
file_df2 = file_df2[1:]
file_df2.columns = new_header #set the header row as the df header
file_df2["#Energy"] = file_df2["#Energy"].astype(float, copy=True)
file_df2["tot"] = file_df2["tot"].astype(float, copy=True)
## Keepp columns of interest only
df_merge = pd.concat([file_df[["#Energy","tot"]], file_df2[["#Energy","tot"]]], axis=1 )
col1 = '%s'%(options.c1)
col2 = '%s'%(options.c2)
## Rename the columns at convinience
df_merge.columns = ['E1', col1, 'E2', col2]
#print (df_merge.columns)
#### Plot the values
fig, ax = plt.subplots()
#
# The columns are transfromed to list
x1 , x2, y1, y2 = df_merge["E1"].to_list(), df_merge["E2"].to_list(),df_merge[col1].to_list(),df_merge[col2].to_list()
#print (x1 , x2, y1, y2)
## pandas has a conflict with the module intelpython3/2020.1
#df_merge.plot(x="E1",y='%s'%(options.c1), grid=True, xlim=(-5,5), ylim=(0,5), ax=ax)
#df_merge.plot(x="E2",y='%s'%(options.c2), grid=True, xlim=(-5,5), ylim=(0,5), style='g', ax=ax)
# This is the block that can plot
ax.plot(x1,y1)
ax.plot(x2,y2)
ax.set_xlim(-5, 5)
ax.set_ylim(0,5)
ax.grid()
ax.legend([col1,col2],fontsize=10)
plt.xlabel("Energy")
plt.ylabel("DOS arbitrary units")
fig.tight_layout()
#
### save the figure with the name, on the same site
plt.savefig("%s.png"%(options.jobid),format="png",dpi=300)
| StarcoderdataPython |
12844240 | <reponame>grillbaer/data-logger
"""
Communication with APATOR EC3 power meter to get its actual readings.
"""
from __future__ import annotations
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021, <NAME>, Bavaria/Germany'
__license__ = 'Apache License 2.0'
import logging
import time
from typing import NamedTuple, Optional, Callable, List
import serial
from serial import SEVENBITS, PARITY_EVEN, SerialException
from utils import RepeatTimer
logger = logging.getLogger().getChild(__name__)
class PowerMeterReading(NamedTuple):
success: bool
consumption_total_sum_kwh: Optional[float]
consumption_high_sum_kwh: Optional[float]
consumption_low_sum_kwh: Optional[float]
class PowerMeterApatorEC3:
"""
Communication object to get readings from an APATOR EC3 electrical power meter.
Tested only with a 12EC3 two tariff version to get the readings for 1.8.1 and 1.8.2 OBIS values.
Unfortunately, this meter does not provide any actual effective power values.
Uses serial communication with the front IR interface.
Sends a request to the power meter and reads it's response, i.e. a bidirectional
TX/RX infrared interface must be connected to the serial port.
Communication needs quite long timeouts and delays because the meter is reaaaaally slow.
"""
serial_port: str
_serial: Optional[serial.Serial]
def __init__(self, serial_port: str):
"""
Create new communication object for power meter.
Does not yet open the serial port.
:param serial_port: serial port to use, e.g. "COM5" on Windows or "/dev/serialUSB0" on Linux
"""
self.serial_port = serial_port
self._serial = None
def open(self) -> None:
"""
Open the serial port if not open yet. Don't forget to close it when not needed any more.
:raises: serial.serialutil.SerialException
"""
if self._serial is None:
logger.info("Opening serial port " + self.serial_port)
self._serial = \
serial.Serial(self.serial_port,
baudrate=300, bytesize=SEVENBITS, parity=PARITY_EVEN,
timeout=10)
def close(self) -> None:
"""
Close the serial port if open.
"""
if self._serial is not None:
logger.info("Closing serial port " + self.serial_port)
self._serial.close()
self._serial = None
def read_raw(self) -> str:
"""
Read the raw response from the power meter.
:return: raw response string
:raises: serial.serialutil.SerialException if communication failed
"""
logger.debug("Sending request on serial port ...")
request = b'/?!\r\n'
self._serial.write(request)
self._serial.flush()
time.sleep(2)
ack_output = b'\x06000\r\n'
self._serial.write(ack_output)
self._serial.flush()
time.sleep(2)
logger.debug("Reading response from serial port ...")
data = self._serial.read(65536)
if len(data) > 0:
logger.debug("Response:\n" + data.decode("ascii"))
return data.decode("ascii")
def read(self) -> PowerMeterReading:
"""
Try to read values from the power meter. Automatically opens the serial interface
if not yet open. Closes it upon SerialException to force reopening on next attempt.
:return: reading with values for the case of success, empty reading in case of failure
"""
try:
self.open()
return self._parse_raw(self.read_raw())
except SerialException:
self.close()
return PowerMeterReading(False, None, None, None)
def _parse_raw(self, raw: str) -> PowerMeterReading:
high = None
low = None
for line in raw.splitlines(keepends=False):
cleaned = line.strip('\x02\x03\n\r \t')
if cleaned.startswith("1.8.1*"):
high = self._parse_line_float(cleaned)
elif cleaned.startswith("1.8.2*"):
low = self._parse_line_float(cleaned)
if high is not None and low is not None:
total = high + low
else:
total = None
return PowerMeterReading(True, total, high, low)
def _parse_line_str(self, cleaned_line: str) -> Optional[str]:
begin = cleaned_line.find("(") + 1
end = cleaned_line.rfind(")")
if begin != -1 and end != -1:
return cleaned_line[begin:end]
else:
return None
def _parse_line_float(self, cleaned_line: str) -> Optional[float]:
try:
return float(self._parse_line_str(cleaned_line))
except ValueError:
return None
class SingleCounter:
_prev_reading: Optional[float]
_prev_was_edge: bool
power: Optional[float]
power_from_ts: Optional[float]
power_to_ts: Optional[float]
def __init__(self):
self._prev_reading = None
self._prev_was_edge = False
self.power = None
self.power_from_ts = None
self.power_to_ts = None
def update(self, reading_kwh: Optional[float], reading_ts: float, min_averaging_secs: float,
other_counter: SingleCounter):
if reading_kwh is not None \
and self._prev_reading != reading_kwh \
and (self.power_to_ts is None or (reading_ts - self.power_to_ts) >= min_averaging_secs):
if self._prev_was_edge and self.power_to_ts is not None:
self.power = (reading_kwh - self._prev_reading) * 3.6e6 / \
(reading_ts - self.power_to_ts)
self.power_from_ts = self.power_to_ts
other_counter.power = 0
other_counter.power_from_ts = self.power_from_ts
other_counter._prev_was_edge = True
if self._prev_reading is not None:
self._prev_was_edge = True
self._prev_reading = reading_kwh
self.power_to_ts = reading_ts
class PowerMeterApatorEC3Repeating:
min_averaging_secs: float
_power_meter: PowerMeterApatorEC3
_timer: RepeatTimer
reading: Optional[PowerMeterReading]
reading_ts: Optional[float]
success: bool
high: SingleCounter
low: SingleCounter
callbacks: List[Callable[[Optional[PowerMeterReading]], None]]
def __init__(self, power_meter: PowerMeterApatorEC3, interval: float, min_averaging_secs: float):
self.min_averaging_secs = min_averaging_secs
self._power_meter = power_meter
self._timer = RepeatTimer(interval, self._acquire)
self.reading = None
self.reading_ts = None
self.success = False
self.high = SingleCounter()
self.low = SingleCounter()
self.callbacks = []
def add_callback(self, callback: Callable[[Optional[PowerMeterReading]], None]):
self.callbacks.append(callback)
def start(self):
if not self._timer.is_alive():
self._timer.start()
def stop(self):
self._timer.cancel()
self._power_meter.close()
def _acquire(self):
try:
ts = time.time()
self.reading = self._power_meter.read()
self.reading_ts = ts
self._update_high_power()
self._update_low_power()
self.success = True
except SerialException:
self.success = False
self._fire()
def _update_low_power(self):
self.low.update(self.reading.consumption_low_sum_kwh, self.reading_ts, self.min_averaging_secs, self.high)
def _update_high_power(self):
self.high.update(self.reading.consumption_high_sum_kwh, self.reading_ts, self.min_averaging_secs, self.low)
def _fire(self):
for callback in self.callbacks:
callback(self.reading)
if __name__ == '__main__':
pm = PowerMeterApatorEC3Repeating(PowerMeterApatorEC3("COM5"), 30, 10)
pm.callbacks.append(lambda r: print(pm.success, r, pm.reading_ts, pm.low.power, pm.high.power))
pm.start()
| StarcoderdataPython |
3262415 | <reponame>Neurita/pypes
# -*- coding: utf-8 -*-
"""
Functions to create pipelines for public and not so public available datasets.
"""
from collections import OrderedDict
from neuro_pypes.io import build_crumb_workflow
from neuro_pypes.config import update_config
from neuro_pypes.anat import (
attach_spm_anat_preprocessing,
attach_ants_cortical_thickness
)
from neuro_pypes.dmri import (
attach_spm_fsl_dti_preprocessing,
attach_camino_tractography
)
from neuro_pypes.fmri import (
attach_rest_preprocessing,
attach_rest_grptemplate_preprocessing
)
from neuro_pypes.pet import (
attach_spm_mrpet_preprocessing,
attach_spm_pet_preprocessing,
attach_spm_pet_grouptemplate
)
def _cobre_wf_setup(wf_name):
""" Return a list of workflow-attach functions and a dict with kwargs for
the `in_out_crumb_wf` function to run workflows against the COBRE database.
Parameters
----------
wf_name
Returns
-------
wf_attachers: dict with wf attacher functions
wf_params: dict with configuration parameters for the workflows.
Use this only to fixed configurations needed for the correct functioning of the workflows.
files_crumb_args: dict with kwargs
This arguments are for the in_out_crumb_wf.
A dict that declares the values each file type the crumb arguments in `data_crumb` must be replaced with.
Example:
{'anat': [('modality', 'anat_1'),
('image', 'mprage.nii.gz')],
'rest': [('modality', 'rest_1'),
('image', 'rest.nii.gz')],
}
"""
attach_functions = {
"spm_anat_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing)
],
"spm_anat_rest_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_rest_preproc", attach_rest_preprocessing),
],
}
if wf_name not in attach_functions:
raise KeyError('Expected an existing pipeline name, got {}. '
'Available options: {}'.format(wf_name, list(attach_functions.keys())))
files_crumb_args = {
'anat': [('modality', 'anat_1'),
('image', 'mprage.nii.gz')]
} # 'anat_1/mprage.nii.gz',
if 'rest' in wf_name:
files_crumb_args.update({
'rest': [('modality', 'rest_1'),
('image', 'rest.nii.gz')
], # 'rest_1/rest.nii.gz'},
})
params = {'file_templates': files_crumb_args}
return OrderedDict(attach_functions[wf_name]), params
def _clinical_wf_setup(wf_name):
""" Return a list of workflow-attach functions and a dict with kwargs for
the `in_out_crumb_wf` function to run workflows against the clinical database.
Parameters
----------
wf_name
Returns
-------
wf_attachers: dict with wf attacher functions
wf_params: dict with configuration parameters for the workflows.
Use this only to fixed configurations needed for the correct functioning of the workflows.
files_crumb_args: dict with kwargs
This arguments are for the in_out_crumb_wf.
A dict that declares the values each file type the crumb arguments in `data_crumb` must be replaced with.
Example:
{'anat': [('modality', 'anat_1'),
('image', 'mprage.nii.gz')],
'rest': [('modality', 'rest_1'),
('image', 'rest.nii.gz')],
}
"""
# MPRAGE bias-field correction, normalization to MNI, and tissue segmentation
attach_functions = {
"spm_anat_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing)
],
# PET normalization to MNI
"spm_pet_preproc": [
("spm_pet_preproc", attach_spm_pet_preprocessing)
],
# will create a PET group template
"spm_pet_template": [
("spm_pet_preproc", attach_spm_pet_preprocessing),
("spm_pet_grouptemplate", attach_spm_pet_grouptemplate),
],
# MPRAGE preprocessing, PET MNI group template, PET PVC, and PET normalization to group template
"spm_anat_pet_tpm_pvc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_pet_preproc", attach_spm_pet_preprocessing),
("spm_pet_grouptemplate", attach_spm_pet_grouptemplate),
],
# MPRAGE preprocessing, PET MNI group template, PET PVC, and rest-fMRI
"spm_anat_pet_pvc_rest": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_pet_preproc", attach_spm_pet_preprocessing),
("spm_pet_grouptemplate", attach_spm_pet_grouptemplate),
("spm_rest_preproc", attach_rest_preprocessing),
],
# MPRAGE preprocessing, PET PVC, and PET normalization to MNI
"spm_anat_pet_pvc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_mrpet_preproc", attach_spm_mrpet_preprocessing),
],
# MPRAGE preprocessing, rs-fMRI preprocessing and normalization to MNI
"spm_anat_rest_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_rest_preproc", attach_rest_preprocessing),
],
# MPRAGE preprocessing, DTI preprocessing with FSL
"fsl_dti_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("fsl_dti_preproc", attach_spm_fsl_dti_preprocessing),
],
# MPRAGE preprocessing, DTI preprocessing with FSL, and tractography with Camino
"anat_dti_camino": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("fsl_dti_preproc", attach_spm_fsl_dti_preprocessing),
("camino_tract", attach_camino_tractography),
],
# MPRAGE and PET preprocessing, DTI preprocessing with FSL, and tractography with Camino
"anat_pet_dti_camino": [("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_mrpet_preproc", attach_spm_mrpet_preprocessing),
("fsl_dti_preproc", attach_spm_fsl_dti_preprocessing),
("camino_tract", attach_camino_tractography),
],
# MPRAGE preprocessing, and EPI group template
"spm_anat_rest_tpm_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_warp_fmri", attach_rest_grptemplate_preprocessing),
],
# MPRAGE preprocessing, EPI and PET group template, and PET and rs-fMRI preprocessing and
# normalization to group template
"spm_anat_pet_rest_tpm_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_warp_fmri", attach_rest_grptemplate_preprocessing),
("spm_pet_preproc", attach_spm_pet_preprocessing),
("spm_pet_grouptemplate", attach_spm_pet_grouptemplate),
],
# MPRAGE preprocessing, EPI group template, and rs-fMRI preprocessing and normalization to MNI
"spm_anat_pet_rest_preproc": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("spm_rest_preproc", attach_rest_preprocessing),
("spm_pet_preproc", attach_spm_mrpet_preprocessing),
],
# MPRAGE and cortical thickness
"spm_anat_ants_cortical_thick": [
("spm_anat_preproc", attach_spm_anat_preprocessing),
("ants_cort_thick", attach_ants_cortical_thickness),
],
}
parameters = {
"spm_pet_template": [('spm_pet_template.do_petpvc', False), ],
"spm_anat_pet_tpm_pvc": [('spm_pet_template.do_petpvc', True), ],
}
if wf_name not in attach_functions:
raise KeyError('Expected an existing pipeline name, got {}. '
'Available options: {}'.format(wf_name, list(attach_functions.keys())))
# the pipeline parameters
wf_params = parameters.get(wf_name, None)
if wf_params is not None:
wf_params = dict(wf_params)
# the input files crumb patterns
files_crumb_args = {}
files_crumb_args.update({'anat': [('image', 'anat_hc.nii.gz')]})
if 'pet' in wf_name:
files_crumb_args.update({'pet': [('image', 'pet_fdg.nii.gz')], })
if 'rest' in wf_name:
files_crumb_args.update({'rest': [('image', 'rest.nii.gz')], })
if 'dti' in wf_name:
files_crumb_args.update({
'diff': [('image', 'diff.nii.gz')],
'bval': [('image', 'diff.bval')],
'bvec': [('image', 'diff.bvec')],
})
return OrderedDict(attach_functions[wf_name]), wf_params, files_crumb_args
def cobre_crumb_workflow(wf_name, data_crumb, output_dir, cache_dir='', config_file='', params=None):
""" Returns a workflow for the COBRE database.
Parameters
----------
wf_name: str
A name for the workflow.
Choices: 'spm_anat_preproc': MPRAGE preprocessing with SPM12
'spm_rest_preproc': MPRAGE+rs-fMRI preprocessing with SPM12
data_crumb: hansel.Crumb
The crumb until the subject files.
Example: Crumb('/home/hansel/cobre/raw/{subject_id}/session_1/{modality}/{image})
The last 2 crumb arguments of `data_crumb` must be '{modality}/{image}', which indicates each of the
subject/session files. This argument will be replaced by the corresponding image name.
cache_dir: str
The working directory of the workflow.
output_dir: str
The output folder path
config_file: str
Path to a configuration file. Will use anything compatible with Kaptan.
Have a look at config.py.
params: dict with arguments
crumb_replaces
atlas_file
Returns
-------
wf: Nipype Workflow
"""
attach_funcs, cfg_params, file_templates = _cobre_wf_setup(wf_name)
if params is None:
params = {}
if config_file:
update_config(config_file)
if params:
update_config(params)
if cfg_params is not None:
update_config(cfg_params)
wf = build_crumb_workflow(
attach_funcs,
data_crumb=data_crumb,
in_out_kwargs=file_templates,
output_dir=output_dir,
cache_dir=cache_dir,
)
return wf
def clinical_crumb_workflow(wf_name, data_crumb, output_dir, cache_dir='', config_file='', params=None):
""" Returns a workflow for the a clinical database.
Parameters
----------
wf_name: str
A name for the workflow to be created.
Choices: 'spm_anat_preproc': MPRAGE preprocessing with SPM12
'spm_anat_pet_preproc': MPRAGE+FDG-PET preprocessing with SPM12
'spm_pet_preproc': FDG-PET only preprocessing with SPM12
data_crumb: hansel.Crumb
The crumb until the subject files.
Example: Crumb('/home/hansel/data/{subject_id}/{session_id}/{modality}/{image})
The last crumb argument of `data_crumb` must be '{image}', which indicates each of the
subject/session files. This argument will be replaced by the corresponding image name.
cache_dir: str
The working directory of the workflow.
output_dir: str
The output folder path
config_file: str
Path to a configuration file. Will use anything compatible with Kaptan.
Have a look at config.py.
params: dict with arguments
crumb_replaces
atlas_file
raise_on_filenotfound
Returns
-------
wf: Nipype Workflow
"""
attach_funcs, cfg_params, file_templates = _clinical_wf_setup(wf_name)
if params is None:
params = {}
if config_file:
update_config(config_file)
if params:
update_config(params)
if cfg_params is not None:
update_config(cfg_params)
wf = build_crumb_workflow(
attach_funcs,
data_crumb=data_crumb,
in_out_kwargs=file_templates,
output_dir=output_dir,
cache_dir=cache_dir,
)
return wf
| StarcoderdataPython |
5129866 | <reponame>esemve/Dreya
from Dreya import Dreya
| StarcoderdataPython |
6506064 | <filename>scripts/generate_root_tld_to_nameservers.py
import requests
import pickle
response = requests.get(
url='http://www.internic.net/domain/root.zone',
)
root_zone_file_data = response.text
ns_records = {}
a_records = {}
for line in root_zone_file_data.splitlines():
if '\tIN\tNS\t' in line:
splitted_line = line.split('\t')
host_label = splitted_line[0].rstrip('.')
record_data = splitted_line[-1].rstrip('.')
if host_label in ns_records:
ns_records[host_label].append(record_data)
else:
ns_records[host_label] = [record_data]
elif '\tIN\tA\t' in line:
splitted_line = line.split('\t')
host_label = splitted_line[0].rstrip('.')
record_data = splitted_line[-1]
if host_label in a_records:
a_records[host_label].append(record_data)
else:
a_records[host_label] = [record_data]
tld_to_nameservers_ips = {}
for tld, nameservers in ns_records.items():
tld = tld.strip()
if tld:
tld_to_nameservers_ips[tld] = []
for nameserver in nameservers:
tld_to_nameservers_ips[tld] += a_records.get(nameserver, [])
with open('root_tld_to_nameservers_ips.pkl', 'wb') as root_tld_to_nameservers_ips_file:
pickle.dump(
obj=tld_to_nameservers_ips,
file=root_tld_to_nameservers_ips_file,
)
| StarcoderdataPython |
1760017 | <gh_stars>0
from .linear import linear
from .step import step
from .sigmoid import sigmoid
from .relu import relu
from .softmax import softmax | StarcoderdataPython |
11394626 | """Script used to generate ablations parameters from base config file."""
import os
import stat
import json
from copy import deepcopy
ABLATIONS_SEED_SHIFT = 100
ablations = {
"vanilla" : {
"controller:entropy_weight" : 0.0,
"training:baseline" : "ewma_R",
"training:b_jumpstart" : False,
"training:alpha" : 0.5,
"training:epsilon" : 1.0,
"training:complexity_weight" : 0.0,
"controller:observe_action" : True,
"controller:observe_parent" : False,
"controller:observe_sibling" : False,
"controller:constrain_const" : False,
"controller:constrain_trig" : False,
"controller:constrain_inv" : False,
"controller:min_length" : 1,
"controller:constrain_min_len" : False,
"controller:constrain_max_len" : False
},
"no_improvements" : {
"controller:entropy_weight" : 0.0,
"training:baseline" : "ewma_R",
"training:b_jumpstart" : False,
"training:alpha" : 0.5,
"training:epsilon" : 1.0,
"training:complexity_weight" : 0.0,
"controller:observe_action" : True,
"controller:observe_parent" : False,
"controller:observe_sibling" : False
},
"no_hierarchical" : {
"controller:observe_action" : True,
"controller:observe_parent" : False,
"controller:observe_sibling" : False
},
"no_entropy" : {
"controller:entropy_weight" : 0.0
},
"no_risk" : {
"training:epsilon" : 1.0,
"training:baseline" : "ewma_R",
"training:alpha" : 0.5,
"training:b_jumpstart" : False,
},
"no_trig" : {
"controller:constrain_trig" : False
},
"no_inv" : {
"controller:constrain_inv" : False
},
"no_min_max" : {
"controller:min_length" : 1,
"controller:constrain_min_len" : False,
"controller:constrain_max_len" : False
},
"no_constraints" : {
"controller:constrain_const" : False,
"controller:constrain_trig" : False,
"controller:constrain_inv" : False,
"controller:min_length" : 1,
"controller:constrain_min_len" : False,
"controller:constrain_max_len" : False
},
"full" : {}, # No ablations; DSR
}
def main():
with open("config/base.json", encoding='utf-8') as f:
template = json.load(f)
# Create config directory
path = os.path.join("config", "ablations")
os.makedirs(path, exist_ok=True)
# Manually turn off saving all rewards
template["training"]["save_all_r"] = False
template["training"]["early_stopping"] = True
template["gp"]["early_stopping"] = True
# Create the run file
run_file = "run_ablations.sh"
open(run_file, 'a').close() # Create the file
st = os.stat(run_file)
os.chmod(run_file, st.st_mode | stat.S_IEXEC)
# For each abalation
for name, spec in ablations.items():
config = deepcopy(template)
logdir = template["training"]["logdir"]
logdir = os.path.join(logdir, "ablations", name)
config["training"]["logdir"] = logdir
# Overwrite config parameters
for k, v in spec.items():
k = k.split(':')
assert k[0] in config
assert k[1] in config[k[0]], (k[1], config[k[0]])
config[k[0]][k[1]] = v
# Save the new config
with open(os.path.join("config", "ablations", "{}.json".format(name)), 'w') as f:
json.dump(config, f, indent=3)
# Add the ablation to the run file
with open(run_file, 'a') as f:
f.write("time python -m dsr.run ./config/ablations/{}.json --method=dsr --b=Nguyen --mc=10 --seed_shift={} --num_cores=24\n".format(name, ABLATIONS_SEED_SHIFT))
if __name__ == "__main__":
main()
| StarcoderdataPython |
8193895 | import json
import requests
import os
try:
# extract token
with open('./cache/token.json', "r") as read_file:
tkns = json.load(read_file)
read_file.close()
# extract url
with open('./cache/cache.json', "r") as read_file:
cache = json.load(read_file)
read_file.close()
# inform about the task
response = requests.get(
cache["URL_BASE"] + "/device/tasks/CONF/doing",
headers={
'Authorization': str(tkns['access_token'])
})
if (response.status_code == 200):
# retrieves config
confData = requests.get(
cache["URL_BASE"] + "/device/confs",
headers={
'Authorization': str(tkns['access_token'])
})
# saves it
if (confData.status_code == 200):
# save configuration
confFILE = './cache/conf.json'
with open(confFILE, "w") as file:
file.write(confData.content)
file.close()
# informs to server about successful task
print('> confirm update')
print(confData)
print(confData.content)
print(confData.json())
response = requests.put(
cache["URL_BASE"] + "/device/confs/" + confData.json()["timestamp"],
headers={
'Authorization': str(tkns['access_token'])
})
print('> Confirmed')
except Exception as err:
print("[CONF.py] Error: "+ str(err))
| StarcoderdataPython |
8085595 | # time complexity could be O(n^3), three for loops
# and the same for space, list size n, each cell
def wordBreak(s, wordDict):
results = [[] for _ in range(len(s) + 1)]
results[len(s)] = ['']
for i in range(len(s) - 1, -1, -1):
for j in range(i + 1, len(s) + 1):
if s[i:j] in wordDict:
for prev in results[j]:
new_list = [s[i:j]]
# print(new_list)
new_list.extend(prev)
results[i].append(new_list)
ret = []
for each in results[0]:
ret.append(' '.join(each))
return ret
def wordBreak_2(s, wordDict):
results = [[] for _ in range(len(s) + 1)]
results[len(s)] = ['']
for i in range(len(s) - 1, -1, -1):
for j in range(i + 1, len(s) + 1):
if s[i:j] in wordDict:
for prev in results[j]:
if prev == '':
results[i].append(s[i:j])
else:
results[i].append(s[i:j] + ' ' + prev)
return results[0]
# recurrisve method by forward backtracking and memorization
def wordBreak_recurrsive(s, wordDict):
mem = {}
return recurrsive_solution(s, wordDict, mem)
def recurrsive_solution(s, wordDict, mem):
if s in mem:
return mem[s]
if not s:
return ['']
new_list = []
for i in range(1, len(s) + 1):
if s[:i] in wordDict:
prev = recurrsive_solution(s[i:], wordDict, mem)
for each in prev:
print(each)
if each == '':
new_list.append(s[:i])
else:
new_list.append(s[:i] + ' ' + each)
mem[s] = new_list
print(mem)
return new_list
print(wordBreak_recurrsive("catsanddog", [
"cat", "cats", "and", "sand", "dog"]))
| StarcoderdataPython |
11236787 | import glob
import os
import sys
import numpy as np
try:
sys.path.append(glob.glob('../../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
# Script level imports
sys.path.append('../../carla')
# from agents.navigation.roaming_agent import RoamingAgent
# from agents.navigation.basic_agent import BasicAgent
############################
from TB_common_functions import Ticking
from Environments.CAV_Game import AV_on_Stright_Road
import __init__
from __init__ import client, world, settings
def main():
try:
world.set_weather(carla.WeatherParameters.ClearNoon)
print("PROGRESS: Weather conditions set :)")
frame = None
world.tick()
settings.synchronous_mode = False
world.apply_settings(settings)
# Setting up environment
env = AV_on_Stright_Road(world)
# ===Set
env.set()
#========================================================================================================
# # Use this bit of code below to convert from game coordinates to geolocation
# current_map = world.get_map() #map obtained by world.get_map()
# degreesToRadians = 1/180.0*np.pi
# x = 1.1538
# y = -71.2938
# z = -0.057
# geolocation_of_map_origin = current_map.transform_to_geolocation(carla.Location(x=float(x), y=float(y), z=float(z)))
# print("GeoLocation of map origin",geolocation_of_map_origin)
# world.debug.draw_string(carla.Location(x=float(x), y=float(y), z=float(z)), 'O', draw_shadow=False,
# color=carla.Color(r=255, g=0, b=0), life_time=120.0,persistent_lines=True)
#==========================================================================================================
# while True:
# env.set()####
while True:
#===Tick
ts, frame = Ticking(world,frame)
# print("Ticking")
# ===Step
env.step()
# if env.test_ended == True:
# env.destroy()
# ts, frame = Ticking(world,frame)
# break
# if env.tests_ended == True:
# break
# print("Tick")
# except Exception:
print("Executed before destroy in exception!!!")
env.destroy()
print("Executed after destroy in exception!!!")
settings.synchronous_mode = False
world.apply_settings(settings)
pass
finally:
# env.end_tests()
# env.destroy_actors()
print("Executed before destroy in finally!!!")
env.destroy()
print("Executed after destroy in finally!!!")
settings.synchronous_mode = False
world.apply_settings(settings)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
print('\ndone.')
| StarcoderdataPython |
9779730 | # -*- coding: utf-8 -*-
import numpy as np
from . import Filter # prevent circular import in Python < 3.5
class Simoncelli(Filter):
r"""Design 2 filters with the Simoncelli construction (tight frame).
This function creates a Parseval filter bank of 2 filters.
The low-pass filter is defined by the function
.. math:: f_{l}=\begin{cases} 1 & \mbox{if }x\leq a\\
\cos\left(\frac{\pi}{2}\frac{\log\left(\frac{x}{2}\right)}{\log(2)}\right) & \mbox{if }a<x\leq2a\\
0 & \mbox{if }x>2a \end{cases}
The high pass filter is adapted to obtain a tight frame.
Parameters
----------
G : graph
a : float
See above equation for this parameter.
The spectrum is scaled between 0 and 2 (default = 2/3).
Examples
--------
Filter bank's representation in Fourier and time (ring graph) domains.
>>> import matplotlib.pyplot as plt
>>> G = graphs.Ring(N=20)
>>> G.estimate_lmax()
>>> G.set_coordinates('line1D')
>>> g = filters.Simoncelli(G)
>>> s = g.localize(G.N // 2)
>>> fig, axes = plt.subplots(1, 2)
>>> g.plot(ax=axes[0])
>>> G.plot_signal(s, ax=axes[1])
"""
def __init__(self, G, a=2./3):
g = [lambda x: simoncelli(x * (2./G.lmax), a)]
g.append(lambda x: np.real(np.sqrt(1 -
(simoncelli(x*(2./G.lmax), a))
** 2)))
def simoncelli(val, a):
y = np.empty(np.shape(val))
l1 = a
l2 = 2 * a
r1ind = (val >= 0) * (val < l1)
r2ind = (val >= l1) * (val < l2)
r3ind = (val >= l2)
y[r1ind] = 1
y[r2ind] = np.cos(np.pi/2 * np.log(val[r2ind]/float(a)) / np.log(2))
y[r3ind] = 0
return y
super(Simoncelli, self).__init__(G, g)
| StarcoderdataPython |
11396714 | # Generated by Django 2.0.2 on 2018-03-08 21:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('strands', '0001_initial'),
('teams', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='strand',
name='original_poster',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='strands', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='strand',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='strands', to='teams.Team'),
),
migrations.AddField(
model_name='strand',
name='tags',
field=models.ManyToManyField(related_name='strands', to='strands.Tag'),
),
]
| StarcoderdataPython |
1822923 | <reponame>kmkurn/ptst-semeval2021<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2021 <NAME>
from collections import defaultdict
from pathlib import Path
from statistics import median
import math
import os
import pickle
import tempfile
from anafora import AnaforaData
from rnnr import Event, Runner
from rnnr.attachments import EpochTimer, MeanReducer, ProgressBar
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from text2array import BucketIterator, ShuffleIterator
from tqdm import tqdm
from transformers import AutoConfig, AutoModelForTokenClassification
import numpy as np
import torch
from aatrn import compute_ambiguous_tag_pairs_mask
from callbacks import (
log_grads,
log_stats,
save_state_dict,
update_params,
)
from crf import LinearCRF
from evaluation import score_time
from ingredients.corpus import ing as corpus_ing, read_samples
from models import RoBERTagger
from utils import make_anafora, print_accs
ex = Experiment("sest10-ptst-testrun", ingredients=[corpus_ing])
ex.captured_out_filter = apply_backspaces_and_linefeeds
# Setup mongodb observer
mongo_url = os.getenv("SACRED_MONGO_URL")
db_name = os.getenv("SACRED_DB_NAME")
if None not in (mongo_url, db_name):
ex.observers.append(MongoObserver.create(url=mongo_url, db_name=db_name))
@ex.config
def default():
# directory to save finetuning artifacts
artifacts_dir = "timex_artifacts"
# whether to overwrite existing artifacts directory
overwrite = False
# temperature to regulate confidence (>1 means less confident)
temperature = 1.0
# whether to freeze the embedding layers
freeze_embeddings = True
# freeze encoder earlier layers up to this layer
freeze_encoder_up_to = 5
# device to run on [cpu, cuda]
device = "cuda" if torch.cuda.is_available() else "cpu"
# cumulative prob threshold
thresh = 0.95
# batch size
batch_size = 50
# learning rate
lr = 1e-5
# max number of epochs
max_epoch = 5
# path to directory containing the gold annotations
gold_path = ""
# whether to write predictions when finetuning is finished
predict_on_finished = False
# load model parameters from here (evaluate)
load_params = "model.pth"
# whether to save confusion matrix (evaluate)
save_confusion_matrix = False
@ex.named_config
def best():
lr = 9e-6
temperature = 2.56
@ex.capture
def run_eval(
model,
id2label,
samples,
corpus,
_log,
device="cpu",
batch_size=32,
gold_path="",
compute_loss=False,
confusion=False,
):
if not gold_path and not compute_loss:
_log.info("Skipping evaluation since gold data isn't provided and loss isn't required")
return None, None
runner = Runner()
runner.state.update({"preds": [], "_ids": []})
@runner.on(Event.BATCH)
def maybe_compute_prediction(state):
if not gold_path:
return
arr = state["batch"].to_array()
state["arr"] = arr
assert arr["mask"].all()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
model.eval()
scores = model(words)
preds = LinearCRF(scores).argmax()
state["preds"].extend(preds.tolist())
state["_ids"].extend(arr["_id"].tolist())
if compute_loss:
state["scores"] = scores
@runner.on(Event.BATCH)
def maybe_compute_loss(state):
if not compute_loss:
return
arr = state["arr"] if "arr" in state else state["batch"].to_array()
state["arr"] = arr
if "scores" in state:
scores = state["scores"]
else:
assert arr["mask"].all()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
model.eval()
scores = model(words)
mask = torch.from_numpy(arr["mask"]).bool().to(device)
ptst_mask = torch.from_numpy(arr["ptst_mask"]).bool().to(device)
masked_scores = scores.masked_fill(~ptst_mask, -1e9)
crf = LinearCRF(masked_scores)
crf_z = LinearCRF(scores)
ptst_loss = -crf.log_partitions().sum() + crf_z.log_partitions().sum()
state["ptst_loss"] = ptst_loss.item()
state["size"] = mask.size(0)
@runner.on(Event.BATCH)
def set_n_items(state):
state["n_items"] = int(state["arr"]["mask"].sum())
n_tokens = sum(len(s["word_ids"]) for s in samples)
ProgressBar(leave=False, total=n_tokens, unit="tok").attach_on(runner)
if compute_loss:
MeanReducer("mean_ptst_loss", value="ptst_loss").attach_on(runner)
with torch.no_grad():
runner.run(BucketIterator(samples, lambda s: len(s["word_ids"]), batch_size))
if runner.state["preds"]:
assert len(runner.state["preds"]) == len(samples)
assert len(runner.state["_ids"]) == len(samples)
for i, preds in zip(runner.state["_ids"], runner.state["preds"]):
samples[i]["preds"] = preds
if gold_path:
group = defaultdict(list)
for s in samples:
group[str(s["path"])].append(s)
with tempfile.TemporaryDirectory() as dirname:
dirname = Path(dirname)
for doc_path, doc_samples in group.items():
spans = [x for s in doc_samples for x in s["spans"]]
labels = [id2label[x] for s in doc_samples for x in s["preds"]]
doc_path = Path(doc_path[len(f"{corpus['path']}/") :])
data = make_anafora(spans, labels, doc_path.name)
(dirname / doc_path.parent).mkdir(parents=True, exist_ok=True)
data.to_file(f"{str(dirname / doc_path)}.xml")
return (
score_time(gold_path, str(dirname), confusion),
runner.state.get("mean_ptst_loss"),
)
return None, runner.state.get("mean_ptst_loss")
@ex.capture
def read_samples_(_log, **kwargs):
samples = list(read_samples(**kwargs))
for i, s in enumerate(samples):
s["_id"] = i
n_toks = sum(len(s["word_ids"]) for s in samples)
_log.info("Read %d samples and %d tokens", len(samples), n_toks)
return samples
@ex.command(unobserved=True)
def evaluate_src_model(_log, _run, device="cpu"):
"""Evaluate the source model."""
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, *args, **kwargs):
emissions = self.model(*args, **kwargs)[0]
bsz, slen, nl = emissions.shape
scores = emissions[:, :-1].unsqueeze(2)
assert scores.shape == (bsz, slen - 1, 1, nl)
scores = scores.expand(bsz, slen - 1, nl, nl)
scores = scores.clone()
scores[:, -1] += emissions[:, -1].unsqueeze(2)
assert scores.shape == (bsz, slen - 1, nl, nl)
return scores
model_name = "clulab/roberta-timex-semeval"
_log.info("Loading %s", model_name)
config = AutoConfig.from_pretrained(model_name)
model = Wrapper(AutoModelForTokenClassification.from_pretrained(model_name, config=config))
model.to(device)
_log.info("Evaluating")
eval_score, _ = run_eval(model, config.id2label, read_samples_())
print_accs(eval_score)
return eval_score["f1"]
@ex.command
def evaluate(
_log,
_run,
temperature=1.0,
artifacts_dir="artifacts",
load_params="model.pth",
device="cpu",
save_confusion_matrix=False,
):
"""Evaluate a trained target model."""
model_name = "clulab/roberta-timex-semeval"
_log.info("Loading %s", model_name)
config = AutoConfig.from_pretrained(model_name)
token_clf = AutoModelForTokenClassification.from_pretrained(model_name, config=config)
model = RoBERTagger(token_clf, config.num_labels, temperature)
artifacts_dir = Path(artifacts_dir)
_log.info("Loading model parameters from %s", artifacts_dir / load_params)
model.load_state_dict(torch.load(artifacts_dir / load_params, "cpu"))
model.to(device)
_log.info("Evaluating")
eval_score, _ = run_eval(model, config.id2label, read_samples_(), confusion=save_confusion_matrix)
c = eval_score.pop("confusion", None)
print_accs(eval_score, on="test", run=_run)
if c is not None:
labels = set()
for k in c.keys():
labels.update(k)
if "O" in labels:
labels.remove("O")
labels = sorted(labels)
labels.insert(0, "O")
label2id = {l: i for i, l in enumerate(labels)}
m = np.zeros((len(labels), len(labels)))
for k, cnt in c.items():
m[label2id[k[0]], label2id[k[1]]] = cnt
_log.info("Saving labels list in %s", artifacts_dir / "labels.pkl")
with open(artifacts_dir / "labels.pkl", "wb") as f:
pickle.dump(labels, f)
_log.info("Saving confusion matrix in %s", artifacts_dir / "confusion.npy")
np.save(artifacts_dir / "confusion.npy", m)
return eval_score["f1"]
@ex.command(unobserved=True)
def report_coverage(
corpus, _log, temperature=1.0, device="cpu", batch_size=16, thresh=0.95, gold_path=""
):
"""Report coverage of gold tags in the chart."""
samples = read_samples_()
model_name = "clulab/roberta-timex-semeval"
_log.info("Loading %s", model_name)
config = AutoConfig.from_pretrained(model_name)
token_clf = AutoModelForTokenClassification.from_pretrained(model_name, config=config)
model = RoBERTagger(token_clf, config.num_labels, temperature)
_log.info("Initializing transitions")
torch.nn.init.zeros_(model.start_transition)
torch.nn.init.zeros_(model.transition)
for lid, label in config.id2label.items():
if not label.startswith("I-"):
continue
with torch.no_grad():
model.start_transition[lid] = -1e9
for plid, plabel in config.id2label.items():
if plabel == "O" or plabel[2:] != label[2:]:
with torch.no_grad():
model.transition[plid, lid] = -1e9
model.to(device)
_log.info("Computing ambiguous PTST tag pairs mask")
model.eval()
ptst_masks, _ids = [], []
pbar = tqdm(total=sum(len(s["word_ids"]) for s in samples), unit="tok")
for batch in BucketIterator(samples, lambda s: len(s["word_ids"]), batch_size):
arr = batch.to_array()
assert arr["mask"].all()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
with torch.no_grad():
ptst_mask = compute_ambiguous_tag_pairs_mask(model(words), thresh)
ptst_masks.extend(ptst_mask.tolist())
_ids.extend(arr["_id"].tolist())
pbar.update(int(arr["mask"].sum()))
pbar.close()
assert len(ptst_masks) == len(samples)
assert len(_ids) == len(samples)
for i, ptst_mask in zip(_ids, ptst_masks):
samples[i]["ptst_mask"] = ptst_mask
_log.info("Reporting coverage of gold labels")
group = defaultdict(list)
for s in samples:
k = str(s["path"])[len(f"{corpus['path']}/") :]
group[k].append(s)
n_cov_tp, n_total_tp, n_cov_ts, n_total_ts = 0, 0, 0, 0
for dirpath, _, filenames in os.walk(gold_path):
if not filenames:
continue
if len(filenames) > 1:
raise ValueError(f"more than 1 file is found in {dirpath}")
if not filenames[0].endswith(".TimeNorm.gold.completed.xml"):
raise ValueError(f"{filenames[0]} doesn't have the expected suffix")
doc_path = os.path.join(dirpath, filenames[0])
data = AnaforaData.from_file(doc_path)
prefix, suffix = f"{gold_path}/", ".TimeNorm.gold.completed.xml"
doc_path = doc_path[len(prefix) : -len(suffix)]
tok_spans = [p for s in group[doc_path] for p in s["spans"]]
tok_spans.sort()
labeling = {}
for ann in data.annotations:
if len(ann.spans) != 1:
raise ValueError("found annotation with >1 span")
span = ann.spans[0]
beg = 0
while beg < len(tok_spans) and tok_spans[beg][0] < span[0]:
beg += 1
end = beg
while end < len(tok_spans) and tok_spans[end][1] < span[1]:
end += 1
if (
beg < len(tok_spans)
and end < len(tok_spans)
and tok_spans[beg][0] == span[0]
and tok_spans[end][1] == span[1]
and beg not in labeling
):
labeling[beg] = f"B-{ann.type}"
for i in range(beg + 1, end + 1):
if i not in labeling:
labeling[i] = f"I-{ann.type}"
labels = ["O"] * len(tok_spans)
for k, v in labeling.items():
labels[k] = v
offset = 0
for s in group[doc_path]:
ts_covd = True
for i in range(1, len(s["spans"])):
plab = labels[offset + i - 1]
lab = labels[offset + i]
if s["ptst_mask"][i - 1][config.label2id[plab]][config.label2id[lab]]:
n_cov_tp += 1
else:
ts_covd = False
n_total_tp += 1
if ts_covd:
n_cov_ts += 1
n_total_ts += 1
offset += len(s["spans"])
_log.info(
"Number of covered tag pairs: %d out of %d (%.1f%%)",
n_cov_tp,
n_total_tp,
100.0 * n_cov_tp / n_total_tp,
)
_log.info(
"Number of covered tag sequences: %d out of %d (%.1f%%)",
n_cov_ts,
n_total_ts,
100.0 * n_cov_ts / n_total_ts,
)
@ex.automain
def finetune(
_log,
_run,
_rnd,
corpus,
artifacts_dir="artifacts",
overwrite=False,
temperature=1.0,
freeze_embeddings=True,
freeze_encoder_up_to=1,
device="cpu",
thresh=0.95,
batch_size=16,
lr=1e-5,
max_epoch=5,
predict_on_finished=False,
):
"""Finetune/train the source model on unlabeled target data."""
artifacts_dir = Path(artifacts_dir)
artifacts_dir.mkdir(exist_ok=overwrite)
samples = read_samples_()
eval_samples = read_samples_(max_length=None)
model_name = "clulab/roberta-timex-semeval"
_log.info("Loading %s", model_name)
config = AutoConfig.from_pretrained(model_name)
token_clf = AutoModelForTokenClassification.from_pretrained(model_name, config=config)
model = RoBERTagger(token_clf, config.num_labels, temperature)
_log.info("Initializing transitions")
torch.nn.init.zeros_(model.start_transition)
torch.nn.init.zeros_(model.transition)
for lid, label in config.id2label.items():
if not label.startswith("I-"):
continue
with torch.no_grad():
model.start_transition[lid] = -1e9
for plid, plabel in config.id2label.items():
if plabel == "O" or plabel[2:] != label[2:]:
with torch.no_grad():
model.transition[plid, lid] = -1e9
for name, p in model.named_parameters():
freeze = False
if freeze_embeddings and ".embeddings." in name:
freeze = True
if freeze_encoder_up_to >= 0:
for i in range(freeze_encoder_up_to + 1):
if f".encoder.layer.{i}." in name:
freeze = True
if freeze:
_log.info("Freezing %s", name)
p.requires_grad_(False)
model.to(device)
_log.info("Computing ambiguous PTST tag pairs mask")
model.eval()
ptst_masks, _ids = [], []
pbar = tqdm(total=sum(len(s["word_ids"]) for s in samples), unit="tok")
for batch in BucketIterator(samples, lambda s: len(s["word_ids"]), batch_size):
arr = batch.to_array()
assert arr["mask"].all()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
with torch.no_grad():
ptst_mask = compute_ambiguous_tag_pairs_mask(model(words), thresh)
ptst_masks.extend(ptst_mask.tolist())
_ids.extend(arr["_id"].tolist())
pbar.update(int(arr["mask"].sum()))
pbar.close()
assert len(ptst_masks) == len(samples)
assert len(_ids) == len(samples)
for i, ptst_mask in zip(_ids, ptst_masks):
samples[i]["ptst_mask"] = ptst_mask
_log.info("Report number of sequences")
log_total_nseqs, log_nseqs = [], []
pbar = tqdm(total=sum(len(s["word_ids"]) for s in samples), leave=False)
for batch in BucketIterator(samples, lambda s: len(s["word_ids"]), batch_size):
arr = batch.to_array()
assert arr["mask"].all()
ptst_mask = torch.from_numpy(arr["ptst_mask"]).bool().to(device)
cnt_scores = torch.zeros_like(ptst_mask).float()
cnt_scores_masked = cnt_scores.masked_fill(~ptst_mask, -1e9)
log_total_nseqs.extend(LinearCRF(cnt_scores).log_partitions().tolist())
log_nseqs.extend(LinearCRF(cnt_scores_masked).log_partitions().tolist())
pbar.update(arr["word_ids"].size)
pbar.close()
cov = [math.exp(x - x_) for x, x_ in zip(log_nseqs, log_total_nseqs)]
_log.info(
"Number of seqs: min {:.2} ({:.2}%) | med {:.2} ({:.2}%) | max {:.2} ({:.2}%)".format(
math.exp(min(log_nseqs)),
100 * min(cov),
math.exp(median(log_nseqs)),
100 * median(cov),
math.exp(max(log_nseqs)),
100 * max(cov),
)
)
_log.info("Creating optimizer")
opt = torch.optim.Adam(model.parameters(), lr=lr)
finetuner = Runner()
@finetuner.on(Event.BATCH)
def compute_loss(state):
arr = state["batch"].to_array()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
mask = torch.from_numpy(arr["mask"]).bool().to(device)
ptst_mask = torch.from_numpy(arr["ptst_mask"]).bool().to(device)
model.train()
scores = model(words, mask)
masked_scores = scores.masked_fill(~ptst_mask, -1e9)
# mask passed to LinearCRF shouldn't include the last token
last_idx = mask.long().sum(dim=1, keepdim=True) - 1
mask_ = mask.scatter(1, last_idx, False)[:, :-1]
crf = LinearCRF(masked_scores, mask_)
crf_z = LinearCRF(scores, mask_)
ptst_loss = -crf.log_partitions().sum() + crf_z.log_partitions().sum()
ptst_loss /= mask.size(0)
state["loss"] = ptst_loss
state["stats"] = {"ptst_loss": ptst_loss.item()}
state["n_items"] = mask.long().sum().item()
finetuner.on(Event.BATCH, [update_params(opt), log_grads(_run, model), log_stats(_run)])
@finetuner.on(Event.EPOCH_FINISHED)
def evaluate(state):
_log.info("Evaluating on train")
eval_score, loss = run_eval(model, config.id2label, samples, compute_loss=True)
if eval_score is not None:
print_accs(eval_score, on="train", run=_run, step=state["n_iters"])
_log.info("train_ptst_loss: %.4f", loss)
_run.log_scalar("train_ptst_loss", loss, step=state["n_iters"])
_log.info("Evaluating on eval")
eval_score, _ = run_eval(model, config.id2label, eval_samples)
if eval_score is not None:
print_accs(eval_score, on="eval", run=_run, step=state["n_iters"])
state["eval_f1"] = None if eval_score is None else eval_score["f1"]
finetuner.on(Event.EPOCH_FINISHED, save_state_dict("model", model, under=artifacts_dir))
@finetuner.on(Event.FINISHED)
def maybe_predict(state):
if not predict_on_finished:
return
_log.info("Computing predictions")
model.eval()
preds, _ids = [], []
pbar = tqdm(total=sum(len(s["word_ids"]) for s in eval_samples), unit="tok")
for batch in BucketIterator(eval_samples, lambda s: len(s["word_ids"]), batch_size):
arr = batch.to_array()
assert arr["mask"].all()
words = torch.from_numpy(arr["word_ids"]).long().to(device)
scores = model(words)
pred = LinearCRF(scores).argmax()
preds.extend(pred.tolist())
_ids.extend(arr["_id"].tolist())
pbar.update(int(arr["mask"].sum()))
pbar.close()
assert len(preds) == len(eval_samples)
assert len(_ids) == len(eval_samples)
for i, preds_ in zip(_ids, preds):
eval_samples[i]["preds"] = preds_
group = defaultdict(list)
for s in eval_samples:
group[str(s["path"])].append(s)
_log.info("Writing predictions")
for doc_path, doc_samples in group.items():
spans = [x for s in doc_samples for x in s["spans"]]
labels = [config.id2label[x] for s in doc_samples for x in s["preds"]]
doc_path = Path(doc_path[len(f"{corpus['path']}/") :])
data = make_anafora(spans, labels, doc_path.name)
(artifacts_dir / "time" / doc_path.parent).mkdir(parents=True, exist_ok=True)
data.to_file(
f"{str(artifacts_dir / 'time' / doc_path)}.TimeNorm.system.completed.xml"
)
EpochTimer().attach_on(finetuner)
n_tokens = sum(len(s["word_ids"]) for s in samples)
ProgressBar(stats="stats", total=n_tokens, unit="tok").attach_on(finetuner)
bucket_key = lambda s: (len(s["word_ids"]) - 1) // 10
trn_iter = ShuffleIterator(
BucketIterator(samples, bucket_key, batch_size, shuffle_bucket=True, rng=_rnd),
rng=_rnd,
)
_log.info("Starting finetuning")
try:
finetuner.run(trn_iter, max_epoch)
except KeyboardInterrupt:
_log.info("Interrupt detected, training will abort")
else:
return finetuner.state.get("eval_f1")
| StarcoderdataPython |
3227895 | <reponame>najsham/pysnobal
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import glob
import sys
import numpy
from setuptools import setup, find_packages, Extension
# from distutils.extension import Extension
from Cython.Distutils import build_ext
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
# make sure we're using GCC
if "CC" not in os.environ:
os.environ["CC"] = "gcc"
if sys.platform == 'darwin':
from distutils import sysconfig
vars = sysconfig.get_config_vars()
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib')
# ------------------------------------------------------------------------------
# Compiling the C code for the Snobal library
loc = 'pysnobal/c_snobal/libsnobal'
cwd = os.getcwd()
sources = glob.glob(os.path.join(loc, '*.c'))
loc = 'pysnobal/c_snobal'
extra_cc_args = ['-fopenmp', '-O3', '-L./pysnobal', '-ggdb3']
sources += [os.path.join(loc, val) for val in ["snobal.pyx"]]
extensions = [
Extension(
"pysnobal.c_snobal.snobal",
sources,
# libraries=["snobal"],
include_dirs=[
numpy.get_include(),
'pysnobal/c_snobal',
'pysnobal/c_snobal/h'
],
# runtime_library_dirs=['{}'.format(os.path.join(cwd,'pysnobal'))],
extra_compile_args=extra_cc_args,
extra_link_args=extra_cc_args,
)
]
setup(
name='pysnobal',
version='0.2.0',
description="Python wrapper of the Snobal mass and "
"energy balance snow model",
long_description=readme + '\n\n' + history,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/USDA-ARS-NWRC/pysnobal',
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=requirements,
license="CC0 1.0",
zip_safe=False,
keywords='pysnobal',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: CC0 1.0',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
test_suite='tests',
tests_require=test_requirements,
cmdclass={
'build_ext': build_ext
},
ext_modules=extensions,
)
| StarcoderdataPython |
6580180 | <reponame>renaudll/maya-mock<filename>tests/unit_tests/base_tests/test_node.py<gh_stars>10-100
"""
Test cases for MockedNode
"""
def test_dagpath(session):
"""Validate that the correct dagpath is returned for a node with a unique name."""
node = session.create_node("transform")
assert node.dagpath == "|transform1"
def test_dagpath_hierarchy(session):
"""Validate that the correct dagpath is returned for a node in a hyerarchy."""
node1 = session.create_node("transform")
node2 = session.create_node("transform")
node3 = session.create_node("transform")
node2.set_parent(node1)
node3.set_parent(node2)
assert node1.dagpath == "|transform1"
assert node2.dagpath == "|transform1|transform2"
assert node3.dagpath == "|transform1|transform2|transform3"
def test_node_melobject(session):
"""Assert that a single node will return it's name."""
node = session.create_node("transform", name="A")
assert node.__melobject__() == "A"
def test_node_shape_transform_melobject(session):
"""Assert creating a shape will create it's appropriate transform."""
shape = session.create_node("mesh", name="A")
assert shape.parent.__melobject__() == "polySurface1"
def test_node_melobject_clashing_rootnode(session):
"""
Assert that a the mel representation of a node at root level that have
the same name as another level will start with a `|`.
"""
node1 = session.create_node("transform", name="A")
node2 = session.create_node("transform", name="parent")
session.create_node("transform", name="A", parent=node2)
assert node1.__melobject__() == "|A"
def test_node_melobject_clashing(session):
"""Assert that a node not at root level that have the same name
as another node will have a unique MEL representation."""
session.create_node("transform", name="A")
node2 = session.create_node("transform", name="parent")
node3 = session.create_node("transform", name="A", parent=node2)
assert node3.__melobject__() == "parent|A"
| StarcoderdataPython |
3392139 | # 3rd party lib
import torch
import torch.nn as nn
import torch.nn.functional as F
# mm lib
from openselfsup.models import HEADS
@HEADS.register_module
class DynamicResslHead(nn.Module):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.07):
super(DynamicResslHead, self).__init__()
self.temperature = temperature
def forward(self, logits_q, logits_k):
"""Forward head.
Args:
logits_q (Tensor): Nxk
logits_k (Tensor): Nxk
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
losses = dict()
# -plogq 算cross entropy, 有个问题一直没想清楚,-qlogp 和 -plogq 有本质区别嘛?
losses['loss_contra'] = - torch.sum( F.softmax(logits_k.detach() / self.temperature, dim=1) \
* F.log_softmax(logits_q / 0.1, dim=1), dim=1).mean()
return losses | StarcoderdataPython |
5198190 | <reponame>agupta54/ulca
MODULE_CONTEXT = {'metadata': {'module': 'USER-MANAGEMENT'},'userID':None}
| StarcoderdataPython |
3496346 | <filename>spaghetti/analysis.py
import numpy as np
class NetworkBase(object):
"""Base object for performing network analysis on a
``spaghetti.Network`` object.
Parameters
----------
ntw : spaghetti.Network
spaghetti Network object.
pointpattern : spaghetti.network.PointPattern
A spaghetti point pattern object.
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed.
permutations : int
The number of permutations to perform. Default 99.
threshold : float
The level at which significance is computed.
(0.5 would be 97.5% and 2.5%).
distribution : str
The distribution from which random points are sampled
Either ``"uniform"`` or ``"poisson"``.
lowerbound : float
The lower bound at which the G-function is computed.
Default 0.
upperbound : float
The upper bound at which the G-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Attributes
----------
sim : numpy.ndarray
simulated distance matrix
npts : int
pointpattern.npoints
xaxis : numpy.ndarray
observed x-axis of values
observed : numpy.ndarray
observed y-axis of values
"""
def __init__(
self,
ntw,
pointpattern,
nsteps=10,
permutations=99,
threshold=0.5,
distribution="poisson",
lowerbound=None,
upperbound=None,
):
# set initial class attributes
self.ntw = ntw
self.pointpattern = pointpattern
self.nsteps = nsteps
self.permutations = permutations
self.threshold = threshold
# set and validate the distribution
self.distribution = distribution
self.validatedistribution()
# create an empty array to store the simulated points
self.sim = np.empty((permutations, nsteps))
self.npts = self.pointpattern.npoints
# set the lower and upper bounds (lower only for G)
self.lowerbound = lowerbound
self.upperbound = upperbound
# compute the statistic (F, G, or K)
self.computeobserved()
self.computepermutations()
# compute the envelope vectors
self.computeenvelope()
def validatedistribution(self):
"""enusure statistical distribution is supported
"""
valid_distributions = ["uniform", "poisson"]
assert (
self.distribution in valid_distributions
), "Distribution not in {}".format(valid_distributions)
def computeenvelope(self):
"""compute upper and lower bounds of envelope
"""
upper = 1.0 - self.threshold / 2.0
lower = self.threshold / 2.0
self.upperenvelope = np.nanmax(self.sim, axis=0) * upper
self.lowerenvelope = np.nanmin(self.sim, axis=0) * lower
def setbounds(self, nearest):
"""set upper and lower bounds
"""
if self.lowerbound is None:
self.lowerbound = 0
if self.upperbound is None:
self.upperbound = np.nanmax(nearest)
class NetworkG(NetworkBase):
"""Compute a network constrained G statistic. This requires the
capability to compute a distance matrix between two point patterns.
In this case one will be observed and one will be simulated.
"""
def computeobserved(self):
"""compute the observed nearest
"""
# find nearest point that is not NaN
nearest = np.nanmin(self.ntw.allneighbordistances(self.pointpattern), axis=1)
self.setbounds(nearest)
# compute a G-Function
observedx, observedy = gfunction(
nearest, self.lowerbound, self.upperbound, nsteps=self.nsteps
)
# set observed values
self.observed = observedy
self.xaxis = observedx
def computepermutations(self):
"""compute permutations of the nearest
"""
# for each round of permutations
for p in range(self.permutations):
# simulate a point pattern
sim = self.ntw.simulate_observations(
self.npts, distribution=self.distribution
)
# find nearest observation
nearest = np.nanmin(self.ntw.allneighbordistances(sim), axis=1)
# compute a G-Function
simx, simy = gfunction(
nearest, self.lowerbound, self.upperbound, nsteps=self.nsteps
)
# label the permutation
self.sim[p] = simy
class NetworkK(NetworkBase):
"""Compute a network constrained K statistic. This requires the
capability to compute a distance matrix between two point patterns.
In this case one will be observed and one will be simulated.
Attributes
----------
lam : float
``lambda`` value
Notes
-----
Based on :cite:`Okabe2001`.
"""
def computeobserved(self):
"""compute the observed nearest
"""
# find nearest point that is not NaN
nearest = self.ntw.allneighbordistances(self.pointpattern)
self.setbounds(nearest)
# set the intensity (lambda)
self.lam = self.npts / sum(self.ntw.arc_lengths.values())
# compute a K-Function
observedx, observedy = kfunction(
nearest, self.upperbound, self.lam, nsteps=self.nsteps
)
# set observed values
self.observed = observedy
self.xaxis = observedx
def computepermutations(self):
"""compute permutations of the nearest
"""
# for each round of permutations
for p in range(self.permutations):
# simulate a point pattern
sim = self.ntw.simulate_observations(
self.npts, distribution=self.distribution
)
# find nearest observation
nearest = self.ntw.allneighbordistances(sim)
# compute a K-Function
simx, simy = kfunction(
nearest, self.upperbound, self.lam, nsteps=self.nsteps
)
# label the permutation
self.sim[p] = simy
class NetworkF(NetworkBase):
"""Compute a network constrained F statistic. This requires the
capability to compute a distance matrix between two point patterns.
In this case one will be observed and one will be simulated.
Attributes
----------
fsim : spaghetti.network.SimulatedPointPattern
simulated point pattern of ``self.nptsv points
"""
def computeobserved(self):
"""compute the observed nearest and simulated nearest
"""
# create an initial simulated point pattern
self.fsim = self.ntw.simulate_observations(self.npts)
# find nearest neighbor distances from
# the simulated to the observed
nearest = np.nanmin(
self.ntw.allneighbordistances(self.fsim, self.pointpattern), axis=1
)
self.setbounds(nearest)
# compute an F-function
observedx, observedy = ffunction(
nearest,
self.lowerbound,
self.upperbound,
nsteps=self.nsteps,
npts=self.npts,
)
# set observed values
self.observed = observedy
self.xaxis = observedx
def computepermutations(self):
"""compute permutations of the nearest
"""
# for each round of permutations
for p in range(self.permutations):
# simulate a point pattern
sim = self.ntw.simulate_observations(
self.npts, distribution=self.distribution
)
# find nearest observation
nearest = np.nanmin(self.ntw.allneighbordistances(sim, self.fsim), axis=1)
# compute an F-function
simx, simy = ffunction(
nearest, self.lowerbound, self.upperbound, self.npts, nsteps=self.nsteps
)
# label the permutation
self.sim[p] = simy
def gfunction(nearest, lowerbound, upperbound, nsteps=10):
"""Compute a G-Function
Parameters
----------
nearest : numpy.ndarray
A vector of nearest neighbor distances.
lowerbound : int or float
The starting value of the sequence.
upperbound : int or float
The end value of the sequence.
nsteps : int
The number of distance bands. Default is 10. Must be
non-negative.
Returns
-------
x : numpy.ndarray
x-axis of values
y : numpy.ndarray
y-axis of values
"""
# set observation count
nobs = len(nearest)
# create interval for x-axis
x = np.linspace(lowerbound, upperbound, nsteps)
# sort nearest neighbor distances
nearest = np.sort(nearest)
# create empty y-axis vector
y = np.empty(len(x))
# iterate over x-axis interval
for i, r in enumerate(x):
# slice out and count neighbors within radius
cnt = len(nearest[nearest <= r])
# if there is one or more neighbors compute `g`
if cnt > 0:
g = cnt / float(nobs)
# otherwise set `g` to zero
else:
g = 0
# label `g` on the y-axis
y[i] = g
return x, y
def kfunction(nearest, upperbound, intensity, nsteps=10):
"""Compute a K-Function
Parameters
----------
nearest : numpy.ndarray
A vector of nearest neighbor distances.
upperbound : int or float
The end value of the sequence.
intensity : float
lambda value
nsteps : int
The number of distance bands. Default is 10. Must be
non-negative.
Returns
-------
x : numpy.ndarray
x-axis of values
y : numpy.ndarray
y-axis of values
"""
# set observation count
nobs = len(nearest)
# create interval for x-axis
x = np.linspace(0, upperbound, nsteps)
# create empty y-axis vector
y = np.empty(len(x))
# iterate over x-axis interval
for i, r in enumerate(x):
# slice out and count neighbors within radius
y[i] = len(nearest[nearest <= r])
# compute k for y-axis vector
y *= intensity ** -1
return x, y
def ffunction(nearest, lowerbound, upperbound, npts, nsteps=10):
"""Compute an F-Function
Parameters
----------
nearest : numpy.ndarray
A vector of nearest neighbor distances.
lowerbound : int or float
The starting value of the sequence.
upperbound : int or float
The end value of the sequence.
npts : int
pointpattern.npoints
nsteps : int
The number of distance bands. Default is 10. Must be
non-negative.
Returns
-------
x : numpy.ndarray
x-axis of values
y : numpy.ndarray
y-axis of values
"""
# set observation count
nobs = len(nearest)
# create interval for x-axis
x = np.linspace(lowerbound, upperbound, nsteps)
# sort nearest neighbor distances
nearest = np.sort(nearest)
# create empty y-axis vector
y = np.empty(len(x))
# iterate over x-axis interval
for i, r in enumerate(x):
# slice out and count neighbors within radius
cnt = len(nearest[nearest <= r])
# if there is one or more neighbors compute `f`
if cnt > 0:
f = cnt / float(npts)
# otherwise set `f` to zero
else:
f = 0
# label `f` on the y-axis
y[i] = f
return x, y
| StarcoderdataPython |
5070306 | <reponame>marklogg/mini_demo
import asyncio
from mini import mini_sdk as MiniSdk
from mini.apis.api_content import LanType
from mini.apis.api_content import QueryWiKi, WikiResponse
from mini.apis.api_content import StartTranslate, TranslateResponse
from mini.apis.base_api import MiniApiResultType
from mini.dns.dns_browser import WiFiDevice
from test.test_connect import test_get_device_by_name
# 测试, 查询wiki
async def test_query_wiki():
"""查询百科demo
查询百科,查询内容"优必选",并等待结果,机器人播报查询结果
#WikiResponse.isSuccess : 是否成功
#WikiResponse.resultCode : 返回码
"""
# query:查询关键字
block: QueryWiKi = QueryWiKi(query='优必选')
# response : WikiResponse
(resultType, response) = await block.execute()
print(f'test_query_wiki result: {response}')
assert resultType == MiniApiResultType.Success, 'test_query_wiki timetout'
assert response is not None and isinstance(response, WikiResponse), 'test_query_wiki result unavailable'
assert response.isSuccess, 'query_wiki failed'
# 测试翻译接口
async def test_start_translate():
"""翻译demo
使用百度翻译,把"张学友",从中文翻译成英文,并等待结果,机器人播报翻译结果
#TranslateResponse.isSuccess : 是否成功
#TranslateResponse.resultCode : 返回码
# query: 关键字
# from_lan: 源语言
# to_lan: 目标语言
# platform: BAIDU, GOOGLE, TENCENT
"""
block: StartTranslate = StartTranslate(query="张学友", from_lan=LanType.CN, to_lan=LanType.EN)
# response: TranslateResponse
(resultType, response) = await block.execute()
print(f'test_start_translate result: {response}')
assert resultType == MiniApiResultType.Success, 'test_start_translate timetout'
assert response is not None and isinstance(response, TranslateResponse), 'test_start_translate result unavailable'
assert response.isSuccess, 'start_translate failed'
async def main():
device: WiFiDevice = await test_get_device_by_name()
if device:
await MiniSdk.connect(device)
await MiniSdk.enter_program()
await test_query_wiki()
await test_start_translate()
await MiniSdk.quit_program()
await MiniSdk.release()
if __name__ == '__main__':
asyncio.run(main())
| StarcoderdataPython |
9736606 | # -*- mode: python; coding: utf-8 -*-
#
# Copyright (C) 1990 - 2016 CONTACT Software GmbH
# All rights reserved.
# http://www.contact.de/
__docformat__ = "restructuredtext en"
__revision__ = "$Id: main.py 142800 2016-06-17 12:53:51Z js $"
import os
from cdb import rte
from cdb import sig
from cs.platform.web import static
from cs.platform.web.root import Root
from cs.web.components.configurable_ui import ConfigurableUIApp
from cs.web.components.configurable_ui import SinglePageModel
class TicketApp(ConfigurableUIApp):
pass
@Root.mount(app=TicketApp, path="/cstraining-web")
def _mount_app():
return TicketApp()
class TicketModel(SinglePageModel):
page_name = "cstraining-web"
@TicketApp.path(path="", model=TicketModel)
def _get_model():
return TicketModel()
@TicketApp.view(model=SinglePageModel, name="document_title", internal=True)
def default_document_title(self, request):
return "Tickets"
@sig.connect(rte.APPLICATIONS_LOADED_HOOK)
def _register_libraries():
lib = static.Library("cstraining-web", "0.0.1",
os.path.join(os.path.dirname(__file__), 'js', 'build'))
lib.add_file("cstraining-web.js")
lib.add_file("cstraining-web.js.map")
static.Registry().add(lib)
| StarcoderdataPython |
3251826 | # -*- coding: utf-8 -*-
"""
zine.importers.wordpress
~~~~~~~~~~~~~~~~~~~~~~~~
Implements an importer for WordPress extended RSS feeds.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import strptime
from datetime import datetime
from lxml import etree
from zine.forms import WordPressImportForm
from zine.importers import Importer, Blog, Tag, Category, Author, Post, Comment
from zine.i18n import lazy_gettext, _
from zine.utils import log
from zine.utils.admin import flash
from zine.utils.xml import Namespace, html_entities, escape
from zine.utils.zeml import parse_html, inject_implicit_paragraphs
from zine.utils.http import redirect_to
from zine.utils.net import open_url
from zine.utils.text import gen_timestamped_slug
from zine.models import COMMENT_UNMODERATED, COMMENT_MODERATED, \
STATUS_DRAFT, STATUS_PUBLISHED
CONTENT = Namespace('http://purl.org/rss/1.0/modules/content/')
DC_METADATA = Namespace('http://purl.org/dc/elements/1.1/')
WORDPRESS = Namespace('http://wordpress.org/export/1.0/')
_xml_decl_re = re.compile(r'<\?xml.*?\?>(?s)')
_meta_value_re = re.compile(r'(<wp:postmeta>.*?<wp:meta_value>)(.*?)'
r'(</wp:meta_value>.*?</wp:postmeta>)(?s)')
_comment_re = re.compile(r'(<wp:comment>.*?<wp:comment_content>)(.*?)'
r'(</wp:comment_content>.*?</wp:comment>)(?s)')
_content_encoded_re = re.compile(r'(<content:encoded>)<!\[CDATA\['
r'(.*?)\]\]>(</content:encoded>)(?s)')
def _wordpress_to_html(markup):
"""Convert WordPress-HTML into read HTML."""
return inject_implicit_paragraphs(parse_html(markup)).to_html()
def parse_broken_wxr(fd):
"""This method reads from a file descriptor and parses a WXR file as
created by current WordPress versions. This method also injects a
custom DTD to not bark on HTML entities and fixes some problems with
regular expressions before parsing. It's not my fault, wordpress is
that crazy :-/
"""
# fix one: add inline doctype that defines the HTML entities so that
# the parser doesn't bark on them, wordpress adds such entities to some
# sections from time to time
inline_doctype = '<!DOCTYPE wordpress [ %s ]>' % ' '.join(
'<!ENTITY %s "&#%d;">' % (name, codepoint)
for name, codepoint in html_entities.iteritems()
)
# fix two: wordpress 2.6 uses "excerpt:encoded" where excerpt is an
# undeclared namespace. What they did makes no sense whatsoever but
# who cares. We're not treating that element anyways but the XML
# parser freaks out. To fix that problem we're wrapping the whole
# thing in another root element
extra = '<wxrfix xmlns:excerpt="ignore:me">'
code = fd.read()
xml_decl = _xml_decl_re.search(code)
if xml_decl is not None:
code = code[:xml_decl.end()] + inline_doctype + extra + \
code[xml_decl.end():]
else:
code = inline_doctype + extra + code
# fix three: find comment sections and escape them. Especially trackbacks
# tent to break the XML structure. same applies to wp:meta_value stuff.
# this is especially necessary for older wordpress dumps, 2.7 fixes some
# of these problems.
def escape_if_good_idea(match):
before, content, after = match.groups()
if not content.lstrip().startswith('<![CDATA['):
content = escape(content)
return before + content + after
code = _meta_value_re.sub(escape_if_good_idea, code)
code = _comment_re.sub(escape_if_good_idea, code)
code += '</wxrfix>'
# fix four: WordPress uses CDATA sections for content. Because it's very
# likely ]]> appears in the text as literal the XML parser totally freaks
# out there. We've had at least one dump that does not import without
# this hack.
def reescape_escaped_content(match):
before, content, after = match.groups()
return before + escape(content) + after
code = _content_encoded_re.sub(reescape_escaped_content, code)
return etree.fromstring(code).find('rss').find('channel')
def parse_wordpress_date(value):
"""Parse a wordpress date or return `None` if not possible."""
try:
return datetime(*strptime(value, '%Y-%m-%d %H:%M:%S')[:7])
except:
pass
def parse_feed(fd):
"""Parse an extended WordPress RSS feed into a structure the general
importer system can handle. The return value is a `Blog` object.
"""
tree = parse_broken_wxr(fd)
authors = {}
def get_author(name):
if name:
author = authors.get(name)
if author is None:
author = authors[name] = Author(name, None)
return author
tags = {}
for item in tree.findall(WORDPRESS.tag):
tag = Tag(item.findtext(WORDPRESS.tag_slug),
item.findtext(WORDPRESS.tag_name))
tags[tag.name] = tag
categories = {}
for item in tree.findall(WORDPRESS.category):
category = Category(item.findtext(WORDPRESS.category_nicename),
item.findtext(WORDPRESS.cat_name))
categories[category.name] = category
posts = []
clean_empty_tags = re.compile("\<(?P<tag>\w+?)\>[\r\n]?\</(?P=tag)\>")
for item in tree.findall('item'):
status = {
'draft': STATUS_DRAFT
}.get(item.findtext(WORDPRESS.status), STATUS_PUBLISHED)
post_name = item.findtext(WORDPRESS.post_name)
pub_date = parse_wordpress_date(item.findtext(WORDPRESS.post_date_gmt))
content_type={'post': 'entry', 'page': 'page'}.get(
item.findtext(WORDPRESS.post_type), 'entry')
slug = None
if pub_date is None or post_name is None:
status = STATUS_DRAFT
if status == STATUS_PUBLISHED:
slug = gen_timestamped_slug(post_name, content_type, pub_date)
# Store WordPress comment ids mapped to Comment objects
comments = {}
for x in item.findall(WORDPRESS.comment):
if x.findtext(WORDPRESS.comment_approved) == 'spam':
continue
commentobj = Comment(
x.findtext(WORDPRESS.comment_author),
x.findtext(WORDPRESS.comment_content),
x.findtext(WORDPRESS.comment_author_email),
x.findtext(WORDPRESS.comment_author_url),
comments.get(x.findtext(WORDPRESS.comment_parent), None),
parse_wordpress_date(x.findtext(
WORDPRESS.comment_date_gmt)),
x.findtext(WORDPRESS.comment_author_ip),
'html',
x.findtext(WORDPRESS.comment_type) in ('pingback',
'traceback'),
(COMMENT_UNMODERATED, COMMENT_MODERATED)
[x.findtext(WORDPRESS.comment_approved) == '1']
)
comments[x.findtext(WORDPRESS.comment_id)] = commentobj
post_body = item.findtext(CONTENT.encoded)
post_intro = item.findtext('description')
if post_intro and not post_body:
post_body = post_intro
post_intro = None
elif post_body:
find_more_results = re.split('<!--more ?.*?-->', post_body)
if len(find_more_results) > 1:
post_intro = clean_empty_tags.sub('',
_wordpress_to_html(find_more_results[0]))
post_body = find_more_results[1]
else:
# hmm. nothing to process. skip that entry
continue
post_body = clean_empty_tags.sub('', _wordpress_to_html(post_body))
post = Post(
slug,
item.findtext('title'),
item.findtext('link'),
pub_date,
get_author(item.findtext(DC_METADATA.creator)),
post_intro,
post_body,
[tags[x.text] for x in item.findall('tag')
if x.text in tags],
[categories[x.text] for x in item.findall('category')
if x.text in categories],
comments.values(),
item.findtext('comment_status') != 'closed',
item.findtext('ping_status') != 'closed',
parser='html',
content_type=content_type
)
posts.append(post)
return Blog(
tree.findtext('title'),
tree.findtext('link'),
tree.findtext('description') or '',
tree.findtext('language') or 'en',
tags.values(),
categories.values(),
posts,
authors.values()
)
class WordPressImporter(Importer):
name = 'wordpress'
title = 'WordPress'
description = lazy_gettext(u'Handles import of WordPress "extended RSS" '
u' feeds.')
def configure(self, request):
form = WordPressImportForm()
if request.method == 'POST' and form.validate(request.form):
dump = request.files.get('dump')
if form.data['download_url']:
try:
dump = open_url(form.data['download_url']).stream
except Exception, e:
log.exception(_('Error downloading feed'))
flash(_(u'Error downloading from URL: %s') % e, 'error')
if not dump:
return redirect_to('import/wordpress')
try:
blog = parse_feed(dump)
except Exception, e:
raise
log.exception(_(u'Error parsing uploaded file'))
flash(_(u'Error parsing uploaded file: %s') % e, 'error')
else:
self.enqueue_dump(blog)
flash(_(u'Added imported items to queue.'))
return redirect_to('admin/import')
return self.render_admin_page('admin/import_wordpress.html',
form=form.as_widget())
| StarcoderdataPython |
3334622 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import six
from functools import wraps, WRAPPER_ASSIGNMENTS
import tensorflow as tf
class TfTemplate(object):
"""This decorator wraps a method with `tf.make_template`. For example,
Examples:
```python
>>> @tf_template('socpe_name')
... my_method():
... # Creates variables
```
"""
def __init__(self, scope):
self.scope = scope
@staticmethod
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def __call__(self, func):
this = self
templated_func = tf.make_template(this.scope, func)
@wraps(func, assigned=TfTemplate.available_attrs(func))
def inner(*args, **kwargs):
return templated_func(*args, **kwargs)
return inner
tf_template = TfTemplate
| StarcoderdataPython |
12804448 | <filename>src/oci/database_migration/models/update_agent_details.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateAgentDetails(object):
"""
ODMS Agent Details
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateAgentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateAgentDetails.
:type display_name: str
:param stream_id:
The value to assign to the stream_id property of this UpdateAgentDetails.
:type stream_id: str
:param public_key:
The value to assign to the public_key property of this UpdateAgentDetails.
:type public_key: str
:param version:
The value to assign to the version property of this UpdateAgentDetails.
:type version: str
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateAgentDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateAgentDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'stream_id': 'str',
'public_key': 'str',
'version': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'stream_id': 'streamId',
'public_key': 'publicKey',
'version': 'version',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._stream_id = None
self._public_key = None
self._version = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateAgentDetails.
ODMS Agent name
:return: The display_name of this UpdateAgentDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateAgentDetails.
ODMS Agent name
:param display_name: The display_name of this UpdateAgentDetails.
:type: str
"""
self._display_name = display_name
@property
def stream_id(self):
"""
Gets the stream_id of this UpdateAgentDetails.
The OCID of the Stream
:return: The stream_id of this UpdateAgentDetails.
:rtype: str
"""
return self._stream_id
@stream_id.setter
def stream_id(self, stream_id):
"""
Sets the stream_id of this UpdateAgentDetails.
The OCID of the Stream
:param stream_id: The stream_id of this UpdateAgentDetails.
:type: str
"""
self._stream_id = stream_id
@property
def public_key(self):
"""
Gets the public_key of this UpdateAgentDetails.
ODMS Agent public key.
:return: The public_key of this UpdateAgentDetails.
:rtype: str
"""
return self._public_key
@public_key.setter
def public_key(self, public_key):
"""
Sets the public_key of this UpdateAgentDetails.
ODMS Agent public key.
:param public_key: The public_key of this UpdateAgentDetails.
:type: str
"""
self._public_key = public_key
@property
def version(self):
"""
Gets the version of this UpdateAgentDetails.
ODMS Agent version
:return: The version of this UpdateAgentDetails.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this UpdateAgentDetails.
ODMS Agent version
:param version: The version of this UpdateAgentDetails.
:type: str
"""
self._version = version
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateAgentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateAgentDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateAgentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateAgentDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateAgentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateAgentDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateAgentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateAgentDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
1684803 | <reponame>peerchemist/py-v-sdk
"""
account contains account-related resources
"""
from __future__ import annotations
import os
from typing import Any, Dict, TYPE_CHECKING, Type, Union
from loguru import logger
# https://stackoverflow.com/a/39757388
if TYPE_CHECKING:
from py_v_sdk import chain as ch
from py_v_sdk import api
from py_v_sdk import model as md
from py_v_sdk import tx_req as tx
from py_v_sdk import dbput as dp
from py_v_sdk.utils.crypto import hashes as hs
from py_v_sdk.utils.crypto import curve_25519 as curve
from py_v_sdk import words as wd
class Wallet:
"""
Wallet is a collection of accounts.
"""
def __init__(self, seed: md.Seed) -> None:
"""
Args:
seed (md.Seed): The seed of the wallet.
"""
self._seed = seed
@property
def seed(self) -> md.Seed:
"""
seed returns the seed of the wallet.
Returns:
md.Seed: The seed of the wallet.
"""
return self._seed
@classmethod
def from_seed_str(cls, s: str) -> Wallet:
"""
from_seed_str creates a wallet from a seed string.
Args:
s (str): The seed string.
Returns:
Wallet: The wallet.
"""
return cls(md.Seed(s))
@classmethod
def register(cls) -> Wallet:
"""
register creates a new wallet with a newly generated seed.
Returns:
Wallet: The wallet.
"""
return cls(cls.new_seed())
def get_account(self, chain: ch.Chain, nonce: int = 0) -> Account:
"""
get_account gets the account of the nonce of the wallet on the given chain.
Args:
chain (ch.Chain): The chain that the account is on.
nonce (int, optional): The nonce of the account. Defaults to 0.
Returns:
Account: The account.
"""
return Account(
chain=chain,
wallet=self,
nonce=nonce,
)
@staticmethod
def new_seed() -> md.Seed:
"""
new_seed generates a seed for a wallet
Returns:
md.Seed: The generated seed.
"""
word_cnt = 2048
words = []
for _ in range(5):
r = os.urandom(4)
x = r[3] + (r[2] << 8) + (r[1] << 16) + (r[0] << 24)
w1 = x % word_cnt
w2 = (x // word_cnt + w1) % word_cnt
w3 = (x // word_cnt // word_cnt + w2) % word_cnt
words.append(wd.WORDS[w1])
words.append(wd.WORDS[w2])
words.append(wd.WORDS[w3])
s = " ".join(words)
return md.Seed(s)
@staticmethod
def get_key_pair(acnt_seed_hash: bytes) -> md.KeyPair:
"""
get_key_pair generates a key pair based on the given account seed hash.
Args:
acnt_seed_hash (bytes): The account seed hash.
Returns:
md.KeyPair: The generated key pair.
"""
pri_key = curve.gen_pri_key(acnt_seed_hash)
pub_key = curve.gen_pub_key(pri_key)
return md.KeyPair(
pub=md.PubKey.from_bytes(pub_key),
pri=md.PriKey.from_bytes(pri_key),
)
@staticmethod
def get_addr(pub_key: bytes, addr_ver: int, chain_id: ch.ChainID) -> md.Bytes:
"""
get_addr generates the address based on the given data.
Args:
pub_key (bytes): The public key.
addr_ver (int): The address version.
chain_id (ch.ChainID): The chain ID.
Returns:
md.Bytes: The generated address.
"""
def ke_bla_hash(b: bytes) -> bytes:
return hs.keccak256_hash(hs.blake2b_hash(b))
raw_addr: str = (
chr(addr_ver) + chain_id.value + ke_bla_hash(pub_key).decode("latin-1")[:20]
)
checksum: str = ke_bla_hash(raw_addr.encode("latin-1")).decode("latin-1")[:4]
b = bytes((raw_addr + checksum).encode("latin-1"))
return md.Bytes(b)
@staticmethod
def get_acnt_seed_hash(seed: str, nonce: int) -> md.Bytes:
"""
get_acnt_seed_hash generates account seed hash based on the given seed & nonce.
Args:
seed (str): The account seed.
nonce (int): The account nonce.
Returns:
md.Bytes: The generated account seed hash.
"""
b = hs.sha256_hash(
hs.keccak256_hash(hs.blake2b_hash(f"{nonce}{seed}".encode("latin-1")))
)
return md.Bytes(b)
class Account:
"""
Account is a class for an account on the chain.
"""
ADDR_VER = 5
def __init__(self, chain: ch.Chain, wallet: Wallet, nonce: int = 0) -> Account:
"""
Args:
chain (ch.Chain): The chain that the account is on.
wallet (Wallet): The wallet that owns the account.
nonce (int, optional): The nonce of the account. Defaults to 0.
"""
self._chain = chain
self._wallet = wallet
self._nonce = md.Nonce(nonce)
self._acnt_seed_hash = wallet.get_acnt_seed_hash(wallet.seed.data, nonce)
self._key_pair = wallet.get_key_pair(self._acnt_seed_hash.data)
self._addr = wallet.get_addr(
self.key_pair.pub.bytes, self.ADDR_VER, self.chain.chain_id
)
@property
def chain(self) -> ch.Chain:
"""
chain returns the chain that the account is on.
Returns:
ch.Chain: The chain that the account is on.
"""
return self._chain
@property
def api(self) -> api.NodeAPI:
"""
api returns the NodeAPI object that the account's chain uses.
Returns:
api.NodeAPI: The NodeAPI object that the account's chain uses.
"""
return self._chain.api
@property
def wallet(self) -> Wallet:
"""
wallet returns the Wallet object for the wallet the account belongs to.
Returns:
Wallet: The wallet object.
"""
return self._wallet
@property
def nonce(self) -> md.Nonce:
"""
nonce returns the account's nonce.
Returns:
int: The account's nonce.
"""
return self._nonce
@property
def acnt_seed_hash(self) -> md.Bytes:
"""
acnt_seed_hash returns the account's account seed hash.
Returns:
md.Bytes: The account's account seed hash.
"""
return self._acnt_seed_hash
@property
def key_pair(self) -> md.KeyPair:
"""
key_pair returns the account's key pair.
Returns:
md.KeyPair: The account's key pair.
"""
return self._key_pair
@property
def addr(self) -> md.Bytes:
"""
addr returns the account's address.
Returns:
md.Bytes: The account's address.
"""
return self._addr
@property
async def balance(self) -> int:
"""
balance returns the account's balance.
Returns:
int: The account's balance.
"""
resp = await self.api.addr.get_balance(self.addr.b58_str)
return resp["balance"]
@property
async def effective_balance(self) -> int:
"""
effective_balance returns the account's effective balance(i.e. The balance that can be spent).
Returns:
int: The account's effective balance.
"""
resp = await self.api.addr.get_effective_balance(self.addr.b58_str)
return resp["balance"]
async def _pay(self, req: tx.PaymentTxReq) -> Dict[str, Any]:
"""
_pay sends a payment transaction request on behalf of the account.
Args:
req (tx.PaymentTxReq): The payment transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.vsys.broadcast_payment(
req.to_broadcast_payment_payload(self.key_pair)
)
async def pay(
self,
recipient: str,
amount: Union[int, float],
attachment: str = "",
fee: int = md.PaymentFee.DEFAULT,
) -> Dict[str, Any]:
"""
pay pays the VSYS coins from the action taker to the recipient.
Args:
recipient (str): The account address of the recipient.
amount (Union[int, float]): The amount of VSYS coins to send.
attachment (str, optional): The attachment of the action. Defaults to "".
fee (int, optional): The fee to pay for this action. Defaults to md.PaymentFee.DEFAULT.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
rcpt_md = md.Addr(recipient)
rcpt_md.must_on(self.chain)
data = await self._pay(
tx.PaymentTxReq(
recipient=rcpt_md,
amount=md.VSYS.for_amount(amount),
timestamp=md.VSYSTimestamp.now(),
attachment=md.Str(attachment),
fee=md.PaymentFee(fee),
)
)
logger.debug(data)
return data
async def _lease(self, req: tx.LeaseTxReq) -> Dict[str, Any]:
"""
_lease sends a leasing transaction request on behalf of the account.
Args:
req (tx.LeaseTxReq): The leasing transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.leasing.broadcast_lease(
req.to_broadcast_leasing_payload(self.key_pair)
)
async def lease(
self,
supernode_addr: str,
amount: Union[int, float],
fee: int = md.LeasingFee.DEFAULT,
) -> Dict[str, Any]:
"""
lease leases the VSYS coins from the action taker to the recipient(a supernode).
Args:
supernode_addr (str): The account address of the supernode to lease to.
amount (Union[int, float]): The amount of VSYS coins to send.
fee (int, optional): The fee to pay for this action. Defaults to md.LeasingFee.DEFAULT.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
addr_md = md.Addr(supernode_addr)
addr_md.must_on(self.chain)
data = await self._lease(
tx.LeaseTxReq(
supernode_addr=addr_md,
amount=md.VSYS.for_amount(amount),
timestamp=md.VSYSTimestamp.now(),
fee=md.LeasingFee(fee),
)
)
logger.debug(data)
return data
async def _cancel_lease(self, req: tx.LeaseCancelTxReq) -> Dict[str, Any]:
"""
_cancel_lease sends a leasing cancel transaction request on behalf of the account.
Args:
req (tx.LeaseCancelTxReq): The leasing cancel transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.leasing.broadcast_cancel(
req.to_broadcast_cancel_payload(self.key_pair)
)
async def cancel_lease(
self,
leasing_tx_id: str,
fee: int = md.LeasingCancelFee.DEFAULT,
) -> Dict[str, Any]:
"""
cancel_lease cancels the leasing.
Args:
leasing_tx_id (str): The transaction ID of the leasing.
fee (int, optional): The fee to pay for this action. Defaults to md.LeasingCancelFee.DEFAULT.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
data = await self._cancel_lease(
tx.LeaseCancelTxReq(
leasing_tx_id=md.TXID(leasing_tx_id),
timestamp=md.VSYSTimestamp.now(),
fee=md.LeasingCancelFee(fee),
)
)
logger.debug(data)
return data
async def _register_contract(self, req: tx.RegCtrtTxReq) -> Dict[str, Any]:
"""
_register_contract sends a register contract transaction on behalf of the account.
Args:
req (tx.RegCtrtTxReq): The register contract transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.ctrt.broadcast_register(
req.to_broadcast_register_payload(self.key_pair)
)
async def _execute_contract(self, req: tx.ExecCtrtFuncTxReq) -> Dict[str, Any]:
"""
_execute_contract sends an execute contract transaction on behalf of the account.
Args:
req (tx.ExecCtrtFuncTxReq): The execute contract transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.ctrt.broadcast_execute(
req.to_broadcast_execute_payload(self.key_pair)
)
async def _db_put(self, req: tx.DBPutTxReq) -> Dict[str, Any]:
"""
_db_put sends a DB Put transaction on behalf of the account.
Args:
req (tx.DBPutTxReq): The DB Put transaction request.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
return await self.api.db.broadcasts_put(
req.to_broadcast_put_payload(self.key_pair)
)
async def db_put(
self,
db_key: str,
data: str,
data_type: Type[dp.DBPutData] = dp.ByteArray,
fee: int = md.DBPutFee.DEFAULT,
) -> Dict[str, Any]:
"""
db_put stores the data under the key onto the chain.
Args:
db_key (str): The db key of the data.
data (str): The data to put.
data_type (Type[dp.DBPutData], optional): The type of the data(i.e. how should the string be parsed).
Defaults to dp.ByteArray.
fee (int, optional): The fee to pay for this action. Defaults to md.DBPutFee.DEFAULT.
Returns:
Dict[str, Any]: The response returned by the Node API.
"""
data = await self._db_put(
tx.DBPutTxReq(
db_key=dp.DBPutKey.from_str(db_key),
data=dp.DBPutData.new(data, data_type),
timestamp=md.VSYSTimestamp.now(),
fee=md.DBPutFee(fee),
)
)
logger.debug(data)
return data
| StarcoderdataPython |
272971 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dcgm_field_helpers import FieldValueEncoder
from DcgmReader import DcgmReader
import abc
import dcgm_fields
import json
import dcgm_structs
import time
import logger
try:
from prometheus_client import start_http_server, Gauge
except ImportError:
pass
logger.warning("prometheus_client not installed, please run: \"pip install prometheus_client\"")
ignore_List = [dcgm_fields.DCGM_FI_DEV_PCI_BUSID, dcgm_fields.DCGM_FI_DEV_UUID]
publishFieldIds = [
dcgm_fields.DCGM_FI_DEV_PCI_BUSID, #Needed for plugin_instance
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_UUID
]
class DcgmJson(DcgmReader):
def __init__(self):
DcgmReader.__init__(self, fieldIds=publishFieldIds, ignoreList=ignore_List)
self.m_jsonData = {} #Json data for each field.
self.m_list=[] # list of jsons of all the fields.
###########################################################################
'''
The customDataHandler creates a json from the fvs dictionary. All jsons are appended to a list which is then returned from
the function.
@params:
fvs : The fieldvalue dictionary that contains info about the values of field Ids for each gpuId.
@return :
list of all the jsons for each gpuID.
'''
def CustomDataHandler(self,fvs):
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
typeInstance = str(gpuId)
for fieldId in list(gpuFv.keys()):
if fieldId in self.m_dcgmIgnoreFields:
continue
self.m_jsonData = {
"GpuId": typeInstance,
"UUID": (gpuFv[dcgm_fields.DCGM_FI_DEV_UUID][-1]).value,
"FieldTag": self.m_fieldIdToInfo[fieldId].tag,
"FieldValues": json.dumps(gpuFv[fieldId], cls=FieldValueEncoder),
}
self.m_list.append(json.dumps(self.m_jsonData))
###########################################################################
'''
function to create json from the field value dictionary.
'''
def CreateJson(self,data=None):
self.Process()
return self.m_list
###########################################################################
###############################################################################
# Usage: #
# #
# obj = DcgmJson() #
# #
# obj.createJson() #
# #
# obj.shutdown() #
###############################################################################
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.