id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
172538 | <filename>Python/Tests/TestData/RemoveImport/EmptyFuncDef2.py
def f():
import fob
import oar | StarcoderdataPython |
1855207 | def bubble_sort(lista):
n = len(lista)
for i in range(n):
for j in range(0, n - i - 1):
if lista[j] > lista[j + 1]:
temp = lista[j]
lista[j] = lista[j + 1]
lista[j + 1] = temp
return lista
bubble_sort([12, 31, 5, 3, 0, 43, 99, 78, 32, 9, 7]) | StarcoderdataPython |
9796998 | <filename>messaging/migrations/0005_slacklog_type.py
# Generated by Django 3.1.13 on 2021-11-06 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("messaging", "0004_slacklog_channel"),
]
operations = [
migrations.AlterField(
model_name="slacklog",
name="type",
field=models.PositiveSmallIntegerField(
choices=[(1, "ARTICLE"), (2, "EVENT"), (3, "WARNING"), (4, "JOB")],
default=1,
),
),
]
| StarcoderdataPython |
74510 | <filename>website/migrations/0006_auto_20181006_2147.py
# Generated by Django 2.1.2 on 2018-10-06 20:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0005_committeerolemember_role_short_name'),
]
operations = [
migrations.AlterModelOptions(
name='committeerolemember',
options={'verbose_name_plural': 'committee roles members'},
),
migrations.AlterModelOptions(
name='society',
options={'verbose_name_plural': 'societies'},
),
migrations.AlterModelOptions(
name='societylink',
options={'verbose_name_plural': 'societies links'},
),
migrations.AlterModelOptions(
name='sponsorlink',
options={'verbose_name_plural': 'sponsors links'},
),
]
| StarcoderdataPython |
8171847 | #
# Copyright 2019-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from typing import Dict, Mapping, Optional
from overrides import overrides
from typing_extensions import Final, final
from .._util.json_ import JsonSerializable
from ..tweet.tweet_stream import TweetStream
DEFAULT_MAX_TWEETS: Final = 100
DEFAULT_BATCH_SIZE: Final = 20
class Request(ABC, JsonSerializable):
def __init__(self, *, max_tweets: Optional[int], batch_size: int):
"""Construct a new timeline view.
:param max_tweets: Stop retrieving Tweets after this many tweets have
been found. Set to None in order to receive as many Tweets as
possible. Note that this can return quite a lot of tweets,
especially if using Search, Filter.LATEST and no date range.
:param batch_size: The batch size in which Tweets should be retrieved.
The normal web interface always queries 20 Tweets per batch. Twitter
interprets this parameter more as a guideline and can either return
more or less then the requested amount. This does not indicate that
no more matching Tweets exist after this batch.
Note that by setting anything unequal to 20 here, we make ourselves
easily distinguishable from a normal web browser. Additionally,
advanced queries like using AND or OR seem to no longer work as
intended. For Thread and Reply, increasing the batch_size is likely
to also increase the number of results (no idea why Twitter is doing
this).
This parameter can be used to speed up the retrieval performance, by
reducing the HTTP overhead as less requests have to be performed per
returned Tweet. If you want to do this, we identified 100 to be a
good value because increasing it further does seem not return more
Tweets per request.
"""
if max_tweets is not None and max_tweets <= 0:
raise ValueError("If max_tweets is given, it must be positive.")
if batch_size <= 0:
raise ValueError("batch_size must be positive.")
self.max_tweets: Final = max_tweets
self.batch_size: Final = batch_size
@final
@overrides
def __eq__(self, other: object) -> bool:
return type(self) == type(other) and self.__dict__ == other.__dict__
@abstractmethod
@overrides
def to_json(self) -> Mapping[str, object]:
obj: Dict[str, object] = {
"type": type(self).__name__,
}
if self.max_tweets != DEFAULT_MAX_TWEETS:
obj["max_tweets"] = self.max_tweets
if self.batch_size != DEFAULT_BATCH_SIZE:
obj["batch_size"] = self.batch_size
return obj
@classmethod
@abstractmethod
@overrides
def from_json(cls, obj: Mapping[str, object]) -> "Request":
from .replies import Replies
from .search import Search
from .thread import Thread
if obj["type"] == Search.__name__:
return Search.from_json(obj)
elif obj["type"] == Replies.__name__:
return Replies.from_json(obj)
elif obj["type"] == Thread.__name__:
return Thread.from_json(obj)
raise RuntimeError("Unknown request type: '{}'.".format(obj["type"]))
@abstractmethod
def request(self) -> TweetStream:
raise NotImplementedError()
| StarcoderdataPython |
3404832 | <filename>evol/utils.py
from inspect import signature
from typing import List, Callable, Union, Sequence, Any, Generator
from evol import Individual
def offspring_generator(parents: List[Individual],
parent_picker: Callable[..., Union[Individual, Sequence]],
combiner: Callable[..., Any],
**kwargs) -> Generator[Individual, None, None]:
"""Generator for offspring.
This helps create the right number of offspring,
especially in the case of of multiple offspring.
:param parents: List of parents.
:param parent_picker: Function that selects parents. Must accept a sequence of
individuals and must return a single individual or a sequence of individuals.
Must accept all kwargs passed (i.e. must be decorated by select_arguments).
:param combiner: Function that combines chromosomes. Must accept a tuple of
chromosomes and either return a single chromosome or yield multiple chromosomes.
Must accept all kwargs passed (i.e. must be decorated by select_arguments).
:param kwargs: Arguments
:returns: Children
"""
while True:
# Obtain parent chromosomes
selected_parents = parent_picker(parents, **kwargs)
if isinstance(selected_parents, Individual):
chromosomes = (selected_parents.chromosome,)
else:
chromosomes = tuple(individual.chromosome for individual in selected_parents)
# Create children
combined = combiner(*chromosomes, **kwargs)
if isinstance(combined, Generator):
for child in combined:
yield Individual(chromosome=child)
else:
yield Individual(chromosome=combined)
def select_arguments(func: Callable) -> Callable:
"""Decorate a function such that it accepts any keyworded arguments.
The resulting function accepts any arguments, but only arguments that
the original function accepts are passed. This allows keyworded
arguments to be passed to multiple (decorated) functions, even if they
do not (all) accept these arguments.
:param func: Function to decorate.
:return: Callable
"""
def result(*args, **kwargs):
try:
return func(*args, **kwargs)
except TypeError:
return func(*args, **{k: v for k, v in kwargs.items() if k in signature(func).parameters})
return result
| StarcoderdataPython |
4983164 | <reponame>libaibuaidufu/flask-blog
#!/usr/bin/env python
#coding:utf-8
import os,sys
from app import create_app,db
from app.models import User,Role,Liuyan,Post,Follow,Fenlei
from flask_script import Manager,Shell
from flask_migrate import Migrate,MigrateCommand
reload(sys)
sys.setdefaultencoding('utf-8')
app = create_app(os.getenv('DFK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app,db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role,Liuyan=Liuyan,Fenlei=Fenlei,Post=Post,Follow=Follow)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run() | StarcoderdataPython |
11318780 | <filename>classification_api/PlainTextParser.py
import codecs
from django.conf import settings
from rest_framework.exceptions import ParseError
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as Plain Text and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
decoded_stream = codecs.getreader(encoding)(stream)
text_content = decoded_stream.read()
return text_content
except ValueError as exc:
raise ParseError('Plain text parse error')
| StarcoderdataPython |
9674419 | <reponame>tsroten/yweather<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnshished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""a Python module that provides an interface to the Yahoo! Weather RSS Feed.
Classes:
Client: interface with the Yahoo! Weather RSS Feed.
Constants:
WOEID_LOOKUP_URL: the URL used to fetch a locationโs corresponding WOEID.
WEATHER_URL: the URL used to fetch a WOEID's weather.
LID_LOOKUP_URL: the URL used to fetch a location's corresponding LID.
LID_WEATHER_URL: the URL used to fetch a LID's weather.
WEATHER_NS: the XML namespace used in the weather RSS feed.
GEO_NS: the XML namespace used for the coordinates in the RSS feed.
CONDITION_IMAGE_URL: the URL of an image depicting the current conditions.
UNITS: a dict that maps data names to units.
"""
try:
from urllib.request import urlopen
from urllib.parse import quote
except ImportError:
from urllib2 import urlopen
from urllib import quote
import contextlib
import re
import xml.etree.ElementTree
WOEID_LOOKUP_URL = ("http://locdrop.query.yahoo.com/v1/public/yql?"
"q=select%20woeid%20from%20locdrop.placefinder%20"
"where%20text='{0}'")
WEATHER_URL = "http://xml.weather.yahoo.com/forecastrss?w={0}&u={1}"
LID_LOOKUP_URL = WEATHER_URL
LID_WEATHER_URL = "http://xml.weather.yahoo.com/forecastrss/{0}_{1}.xml"
WEATHER_NS = "http://xml.weather.yahoo.com/ns/rss/1.0"
GEO_NS = "http://www.w3.org/2003/01/geo/wgs84_pos#"
CONDITION_IMAGE_URL = "http://l.yimg.com/a/i/us/we/52/{0}.gif"
UNITS = {
"c": {
"wind": {
"chill": "ยฐC",
"direction": "ยฐ",
"speed": "km/h"},
"atmosphere": {
"humidity": "%",
"visibility": "km",
"pressure": "hPa"},
"condition": {
"temp": "ยฐC"},
"forecast": {
"low": "ยฐC",
"high": "ยฐC"},
},
"f": {
"wind": {
"chill": "ยฐF",
"direction": "ยฐ",
"speed": "mph"},
"atmosphere": {
"humidity": "%",
"visibility": "mi",
"pressure": "psi"},
"condition": {
"temp": "ยฐF"},
"forecast": {
"low": "หF",
"high": "ยฐF"},
},
}
class Client(object):
"""Interface with the Yahoo! Weather RSS feed.
Provides methods to search for location data and fetch weather data.
Methods:
fetch_lid: fetch a location's LID.
fetch_woeid: fetch a location's WOEID.
fetch_weather: fetch a location's weather.
"""
def fetch_lid(self, woeid):
"""Fetch a location's corresponding LID.
Args:
woeid: (string) the location's WOEID.
Returns:
a string containing the requested LID or None if the LID could
not be found.
Raises:
urllib.error.URLError: urllib.request could not open the URL
(Python 3).
urllib2.URLError: urllib2 could not open the URL (Python 2).
xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to
parse the XML document.
"""
rss = self._fetch_xml(LID_LOOKUP_URL.format(woeid, "f"))
# We are pulling the LID from the permalink tag in the XML file
# returned by Yahoo.
try:
link = rss.find("channel/link").text
except AttributeError:
return None
# use regex or string.split
# regex assumes the format XXXXNNNN for the LID.
# string.split works more general of the context.
lid = re.search("[A-Za-z]{4}[0-9]{4}", link).group()
# lid = link.split("/forecast/")[1].split("_")[0]
return lid
def fetch_weather(self, id, metric=False):
"""Fetch a location's weather.
*id* can be either a WOEID or LID. The weather data returned for each
is identical except that the WOEID returns a 2-day forecast and the LID
returns a 5-day forecast. The LID uses an undocumented API, so use it
at your own risk.
Args:
id: (string) the location's WOEID or LID.
metric: (bool) return metric data; defaults to False.
Returns:
a dict containing the location's weather data or None if
the weather data couldn't be fetched.
Raises:
urllib.error.URLError: urllib.request could not open the URL
(Python 3).
urllib2.URLError: urllib2 could not open the URL (Python 2).
xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to
parse the XML document.
"""
units = "c" if metric else "f"
# WOEID is a 32-bit integer, while LID is XXXXNNNN, where X is a letter
# and N is a number. So, we pick the URL to use based on whether or not
# the *id* begins with a letter.
if re.match("^[A-Za-z]", id):
url = LID_WEATHER_URL.format(id, units)
else:
url = WEATHER_URL.format(id, units)
rss = self._fetch_xml(url)
if rss.find("channel/item/title").text == "City not found":
return None
# xml_items details which tags should be read and what their
# destination dict key should be. These tags don't appear
# multiple times.
# {XML tag: [ElementTree access method, dict key]}
xml_items = {
"channel/title": ["text", "title"],
"channel/link": ["text", "link"],
"channel/language": ["text", "language"],
"channel/description": ["text", "description"],
"channel/lastBuildDate": ["text", "lastBuildDate"],
"channel/ttl": ["text", "ttl"],
"channel/image/url": ["text", "logo"],
"channel/item/guid": ["text", "guid"],
"channel/{%s}location" % WEATHER_NS:
["attrib", "location"],
# "channel/{%s}units" % WEATHER_NS:
# ["attrib", "units"],
"channel/{%s}wind" % WEATHER_NS:
["attrib", "wind"],
"channel/{%s}atmosphere" % WEATHER_NS:
["attrib", "atmosphere"],
"channel/{%s}astronomy" % WEATHER_NS:
["attrib", "astronomy"],
"channel/item/{%s}condition" % WEATHER_NS:
["attrib", "condition"],
}
weather = {}
weather["units"] = UNITS[units]
for (tag, meta) in xml_items.items():
if meta[0] == "text":
try:
weather[meta[1]] = rss.find(tag).text
except AttributeError:
weather[meta[1]] = None
elif meta[0] == "attrib":
try:
weather[meta[1]] = rss.find(tag).attrib
except AttributeError:
weather[meta[1]] = None
else:
weather[meta[1]] = None
try:
image_url = CONDITION_IMAGE_URL.format(weather["condition"]["code"])
weather["condition"]["image"] = image_url
except (AttributeError, TypeError):
pass
try:
state = weather["atmosphere"]["rising"]
if state == "0":
weather["atmosphere"]["state"] = "steady"
elif state == "1":
weather["atmosphere"]["state"] = "rising"
elif state == "2":
weather["atmosphere"]["state"] = "falling"
else:
weather["atmosphere"]["state"] = None
except (AttributeError, TypeError):
pass
weather["forecast"] = []
try:
for item in rss.findall(
"channel/item/{%s}forecast" % WEATHER_NS):
weather["forecast"].append(item.attrib)
except AttributeError:
weather["forecast"] = None
weather["geo"] = {}
try:
weather["geo"]["lat"] = rss.find(
"channel/item/{%s}lat" % GEO_NS).text
weather["geo"]["long"] = rss.find(
"channel/item/{%s}long" % GEO_NS).text
except AttributeError:
weather["geo"] = None
try:
weather["wind"]["compass"] = self._degrees_to_direction(
weather["wind"]["direction"])
except TypeError:
pass
return weather
def fetch_woeid(self, location):
"""Fetch a location's corresponding WOEID.
Args:
location: (string) a location (e.g. 23454 or Berlin, Germany).
Returns:
a string containing the location's corresponding WOEID or None if
the WOEID could not be found.
Raises:
urllib.error.URLError: urllib.request could not open the URL
(Python 3).
urllib2.URLError: urllib2 could not open the URL (Python 2).
xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to
parse the XML document.
"""
rss = self._fetch_xml(
WOEID_LOOKUP_URL.format(quote(location)))
try:
woeid = rss.find("results/Result/woeid").text
except AttributeError:
return None
return woeid
def _degrees_to_direction(self, degrees):
"""Convert wind direction from degrees to compass direction."""
try:
degrees = float(degrees)
except ValueError:
return None
if degrees < 0 or degrees > 360:
return None
if degrees <= 11.25 or degrees >= 348.76:
return "N"
elif degrees <= 33.75:
return "NNE"
elif degrees <= 56.25:
return "NE"
elif degrees <= 78.75:
return "ENE"
elif degrees <= 101.25:
return "E"
elif degrees <= 123.75:
return "ESE"
elif degrees <= 146.25:
return "SE"
elif degrees <= 168.75:
return "SSE"
elif degrees <= 191.25:
return "S"
elif degrees <= 213.75:
return "SSW"
elif degrees <= 236.25:
return "SW"
elif degrees <= 258.75:
return "WSW"
elif degrees <= 281.25:
return "W"
elif degrees <= 303.75:
return "WNW"
elif degrees <= 326.25:
return "NW"
elif degrees <= 348.75:
return "NNW"
else:
return None
def _fetch_xml(self, url):
"""Fetch a url and parse the document's XML."""
with contextlib.closing(urlopen(url)) as f:
return xml.etree.ElementTree.parse(f).getroot()
| StarcoderdataPython |
168322 | import networkx
import numpy
import chainer
from chainer_chemistry.dataset.graph_dataset.base_graph_dataset import PaddingGraphDataset, SparseGraphDataset # NOQA
from chainer_chemistry.dataset.graph_dataset.base_graph_data import PaddingGraphData, SparseGraphData # NOQA
from chainer_chemistry.dataset.graph_dataset.feature_converters import batch_without_padding # NOQA
class BaseNetworkxPreprocessor(object):
"""Base class to preprocess `Networkx::Graph` object"""
def __init__(self, *args, **kwargs):
pass
def get_x(self, graph):
if 'x' in graph.graph:
x = graph.graph['x']
else:
feature_dim, = graph.nodes[0]['x'].shape
x = numpy.empty((graph.number_of_nodes(), feature_dim),
dtype=numpy.float32)
for v, data in graph.nodes.data():
x[v] = data['x']
return x
def get_y(self, graph):
if 'y' in graph.graph:
y = graph.graph['y']
else:
y = numpy.empty(graph.number_of_nodes(), dtype=numpy.int32)
for v, data in graph.nodes.data():
y[v] = data['y']
return y
class BasePaddingNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `PaddingGraphDataset`
""" # NOQA
def __init__(self, use_coo=False, *args, **kwargs):
self.use_coo = use_coo
def construct_data(self, graph):
"""Construct `PaddingGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
PaddingGraphData: graph data of padding pattern
"""
if not self.use_coo:
return PaddingGraphData(
x=self.get_x(graph),
adj=networkx.to_numpy_array(graph, dtype=numpy.float32),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
n_edges = graph.number_of_edges() * 2
row = numpy.empty((n_edges), dtype=numpy.int)
col = numpy.empty((n_edges), dtype=numpy.int)
data = numpy.ones((n_edges), dtype=numpy.float32)
for i, edge in enumerate(graph.edges):
row[2 * i] = edge[0]
row[2 * i + 1] = edge[1]
col[2 * i] = edge[1]
col[2 * i + 1] = edge[0]
# ensure row is sorted
if not numpy.all(row[:-1] <= row[1:]):
order = numpy.argsort(row)
row = row[order]
col = col[order]
assert numpy.all(row[:-1] <= row[1:])
adj = chainer.utils.CooMatrix(
data=data, row=row, col=col,
shape=(graph.number_of_nodes(), graph.number_of_nodes()),
order='C')
return PaddingGraphData(
x=self.get_x(graph),
adj=adj,
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def create_dataset(self, graph_list):
"""Create `PaddingGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
PaddingGraphDataset: graph dataset of padding pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = PaddingGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
class BaseSparseNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `SparseGraphDataset`
"""
def construct_data(self, graph):
"""Construct `SparseGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
SparseGraphData: graph data of sparse pattern
"""
edge_index = numpy.empty((2, graph.number_of_edges() * 2),
dtype=numpy.int)
for i, edge in enumerate(graph.edges):
edge_index[0][2 * i] = edge[0]
edge_index[0][2 * i + 1] = edge[1]
edge_index[1][2 * i] = edge[1]
edge_index[1][2 * i + 1] = edge[0]
return SparseGraphData(
x=self.get_x(graph),
edge_index=numpy.array(edge_index, dtype=numpy.int),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def add_self_loop(self, graph):
for v in range(graph.number_of_nodes()):
graph.add_edge(v, v)
return graph
def create_dataset(self, graph_list):
"""Create `SparseGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
SparseGraphDataset: graph dataset of sparse pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = SparseGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
| StarcoderdataPython |
100053 | import platform
from pathlib import Path
import numpy as np
import torch
from spconv.pytorch import ops
from spconv.pytorch.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
SparseConvTranspose3d, SparseInverseConv2d,
SparseInverseConv3d, SubMConv2d, SubMConv3d)
from spconv.pytorch.core import SparseConvTensor
from spconv.pytorch.identity import Identity
from spconv.pytorch.modules import SparseModule, SparseSequential
from spconv.pytorch.ops import ConvAlgo
from spconv.pytorch.pool import SparseMaxPool2d, SparseMaxPool3d
from spconv.pytorch.tables import AddTable, ConcatTable, JoinTable
class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor.
"""
def forward(self, x: SparseConvTensor):
return x.dense()
class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer.
"""
def forward(self, x: SparseConvTensor):
x.grid = None
return x
| StarcoderdataPython |
3316586 | import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.mutable import MutableDict
from lms.db import BASE
class Assignment(BASE):
"""
An assignment configuration.
When an LMS doesn't support LTI content-item selection/deep linking (so it
doesn't support storing an assignment's document URL in the LMS and passing
it back to us in launch requests) then we store the document URL in the
database instead.
Each persisted Assignment object represents a DB-stored
assignment configuration, with the
``(tool_consumer_instance_guid, resource_link_id)`` launch params
identifying the LTI resource (module item or assignment) and the
``document_url`` being the URL of the document to be annotated.
"""
__tablename__ = "module_item_configurations"
__table_args__ = (
sa.UniqueConstraint("resource_link_id", "tool_consumer_instance_guid"),
)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
resource_link_id = sa.Column(sa.Unicode, nullable=False)
"""The resource_link_id launch param of the assignment."""
tool_consumer_instance_guid = sa.Column(sa.Unicode, nullable=False)
"""
The tool_consumer_instance_guid launch param of the LMS.
This is needed because resource_link_id's aren't guaranteed to be unique
across different LMS's.
"""
document_url = sa.Column(sa.Unicode, nullable=False)
"""The URL of the document to be annotated for this assignment."""
extra = sa.Column(
"extra",
MutableDict.as_mutable(JSONB),
server_default=sa.text("'{}'::jsonb"),
nullable=False,
)
def get_canvas_mapped_file_id(self, file_id):
return self.extra.get("canvas_file_mappings", {}).get(file_id, file_id)
def set_canvas_mapped_file_id(self, file_id, mapped_file_id):
self.extra.setdefault("canvas_file_mappings", {})[file_id] = mapped_file_id
| StarcoderdataPython |
3565474 | <gh_stars>0
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django_paymentsos import signals
from django_paymentsos.enumerators import ResultStatus, EventType
from django_paymentsos.fields import JSONField
from django_paymentsos.utils import get_signature
class ProviderSpecificData(models.Model):
device_fingerprint = JSONField(blank=True)
additional_details = JSONField(blank=True)
class Meta:
abstract = True
class PaymentMethod(models.Model):
type = models.CharField(max_length=100, blank=True)
token = models.CharField(max_length=100, blank=True)
token_type = models.CharField(max_length=100, blank=True)
holder_name = models.CharField(max_length=100, blank=True)
expiration_date = models.CharField(max_length=100, blank=True)
last_4_digits = models.CharField(max_length=100, blank=True)
pass_luhn_validation = models.BooleanField(default=False)
fingerprint = models.CharField(max_length=100, blank=True)
bin_number = models.CharField(max_length=100, blank=True)
vendor = models.CharField(max_length=100, blank=True)
issuer = models.CharField(max_length=100, blank=True)
card_type = models.CharField(max_length=100, blank=True)
level = models.CharField(max_length=100, blank=True)
country_code = models.CharField(max_length=100, blank=True)
method_created = models.CharField(max_length=100, blank=True)
billing_address = JSONField(blank=True)
class Meta:
abstract = True
class Result(models.Model):
result_status = models.CharField(max_length=100, blank=True)
category = models.CharField(max_length=100, blank=True)
sub_category = models.CharField(max_length=100, blank=True)
result_description = models.CharField(max_length=100, blank=True)
class Meta:
abstract = True
def get_status(self):
try:
return ResultStatus(self.result_status)
except ValueError:
return self.result_status
@property
def is_result_status_succeed(self):
return self.get_status() == ResultStatus.SUCCEED
@property
def is_result_status_failed(self):
return self.get_status() == ResultStatus.FAILED
@property
def is_result_status_pending(self):
return self.get_status() == ResultStatus.PENDING
class ProviderData(models.Model):
provider_name = models.CharField(max_length=100, blank=True)
response_code = models.CharField(max_length=100, blank=True)
provider_description = models.CharField(max_length=100, blank=True)
raw_response = JSONField(blank=True)
transaction_id = models.CharField(max_length=100, blank=True)
external_id = models.CharField(max_length=100, blank=True)
class Meta:
abstract = True
class Flag(models.Model):
DUPLICATED_WEBHOOK = '1001'
INVALID_SIGN = '1002'
FLAG_CODES = (
(DUPLICATED_WEBHOOK, 'Duplicated Webhook'),
(INVALID_SIGN, 'Invalid Sign'),
)
flag = models.BooleanField(default=False)
flag_code = models.CharField(max_length=4, blank=True, choices=FLAG_CODES)
flag_info = models.CharField(max_length=100, blank=True)
class Meta:
abstract = True
@property
def is_flagged(self):
return self.flag
def save(self, *args, **kwargs):
exists = PaymentsOSNotification.objects.filter(webhook_id=self.webhook_id).exists()
if not self.id and exists:
self.flag = True
self.flag_code = self.DUPLICATED_WEBHOOK
self.flag_info = 'Duplicate webhook_id.'
super().save(*args, **kwargs)
class PaymentNotification(ProviderSpecificData, PaymentMethod, Result, ProviderData, Flag):
data_id = models.CharField(max_length=100, blank=True)
reconciliation_id = models.CharField(max_length=100, blank=True)
amount = models.CharField(max_length=100, blank=True)
notification_created = models.CharField(max_length=100, blank=True)
currency = models.CharField(max_length=100, blank=True) # payment.payment.create
modified = models.CharField(max_length=100, blank=True) # payment.payment.create
statement_soft_descriptor = models.CharField(max_length=100, blank=True) # payment.payment.create
status = models.CharField(max_length=100, blank=True) # payment.payment.create
class Meta:
abstract = True
class PaymentsOSNotification(PaymentNotification):
webhook_id = models.CharField(max_length=100, blank=True)
payment_id = models.CharField(max_length=100, blank=True)
account_id = models.CharField(max_length=100, blank=True)
app_id = models.CharField(max_length=100, blank=True)
x_zooz_request_id = models.CharField(max_length=100, blank=True)
x_payments_os_env = models.CharField(max_length=100, blank=True)
version = models.CharField(max_length=100, blank=True)
event_type = models.CharField(max_length=100, blank=True)
signature = models.CharField(max_length=100, blank=True)
created = models.DateTimeField()
raw = JSONField()
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'paymentsos_notification'
verbose_name = 'PaymentsOS Notification'
verbose_name_plural = 'PaymentsOS Notifications'
@property
def is_event_type_payment_create(self):
return self.get_event_type() == EventType.PAYMENT_CREATE
@property
def is_event_type_charge_create(self):
return self.get_event_type() == EventType.CHARGE_CREATE
@property
def is_test(self):
return True if self.x_payments_os_env == 'test' else False
def get_event_type(self):
try:
return EventType(self.event_type)
except ValueError:
return self.event_type
def __str__(self):
return self.payment_id
def save(self, *args, **kwargs):
if not self.id:
signature = get_signature(
self.event_type, self.webhook_id, self.account_id, self.payment_id, self.raw['created'], self.app_id,
self.data_id, self.result_status, self.category, self.sub_category, self.response_code,
self.reconciliation_id, self.amount, self.currency
)
if self.signature[len('sig1='):] != signature:
self.flag = True
self.flag_code = Flag.INVALID_SIGN
self.flag_info = 'Invalid signature.'
super().save(*args, **kwargs)
@receiver(post_save, sender=PaymentsOSNotification)
def payment_notification_save(sender, instance, created, **kwargs):
if created:
if instance.is_flagged:
signals.invalid_notification_received.send(sender=PaymentsOSNotification, instance=instance)
signals.notification_flagged.send(sender=PaymentsOSNotification, instance=instance)
return
else:
signals.valid_notification_received.send(sender=PaymentsOSNotification, instance=instance)
if instance.is_event_type_payment_create:
signals.notification_type_payment_create.send(sender=PaymentsOSNotification, instance=instance)
return
if instance.is_event_type_charge_create:
signals.notification_type_charge_create.send(sender=PaymentsOSNotification, instance=instance)
if instance.is_result_status_succeed:
signals.transaction_result_succeed.send(sender=PaymentsOSNotification, instance=instance)
elif instance.is_result_status_failed:
signals.transaction_result_failed.send(sender=PaymentsOSNotification, instance=instance)
elif instance.is_result_status_pending:
signals.transaction_result_pending.send(sender=PaymentsOSNotification, instance=instance)
| StarcoderdataPython |
1803987 | #!/usr/bin/env python
# encoding: utf-8
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 7: # do not run with tox in non-2.7 versions (fails because not building the C files)
import unittest
import os
from Naked.toolshed.c.shell import execute, execute_rb, execute_js, run, run_rb, run_js, muterun, muterun_rb, muterun_js
from Naked.toolshed.types import NakedObject
class NakedCShellTest(unittest.TestCase):
def setUp(self):
self.good_command = "echo 'test command'" #zero exit status, good command
self.bad_command = "bogusapp -help" #non-zero exit status, missing executable
self.missing_option = "ls --random" #non-zero exit status from an executable that is present
self.node_success_path = os.path.join('testfiles', 'keep', 'js', 'node_success.js')
self.node_fail_path = os.path.join('testfiles', 'keep', 'js', 'node_error.js')
self.ruby_success_path = os.path.join('testfiles', 'keep', 'rb', 'ruby_success.rb')
self.ruby_fail_path = os.path.join('testfiles', 'keep', 'rb', 'ruby_error.rb')
def tearDown(self):
pass
#------------------------------------------------------------------------------
# execute() method tests
#------------------------------------------------------------------------------
def test_execute_good_command(self):
self.assertTrue(execute(self.good_command))
def test_execute_bad_command(self):
self.assertFalse(execute(self.bad_command))
def test_execute_missing_option(self):
self.assertFalse(execute(self.missing_option))
#------------------------------------------------------------------------------
# run() method tests
#------------------------------------------------------------------------------
def test_run_good_command(self):
self.assertEqual(b"test command\n", run(self.good_command))
def test_run_good_command_suppress_stdout(self):
self.assertEqual(b"test command\n", run(self.good_command, suppress_stdout=True)) # still receive return value when stout print suppressed
def test_run_bad_command(self):
self.assertEqual(False, run(self.bad_command))
def test_run_bad_command_output(self):
test_string = run(self.bad_command)
self.assertEqual(False, run(self.bad_command))
def test_run_bad_command_output(self):
with self.assertRaises(SystemExit): # raises SystemExit when suppress_exit_status_call = False
self.assertEqual(False, run(self.bad_command, suppress_exit_status_call=False))
def test_run_missing_option(self):
self.assertEqual(False, run(self.missing_option))
#------------------------------------------------------------------------------
# muterun() tests
#------------------------------------------------------------------------------
def test_muterun_good_command_return_type(self):
self.assertEqual(type(NakedObject()), type(muterun(self.good_command))) # returns NakedObject on success
def test_muterun_bad_command_return_type(self):
self.assertEqual(type(NakedObject()), type(muterun(self.bad_command))) # returns NakedObject on error
def test_muterun_good_command_exitcode(self):
out = muterun(self.good_command)
self.assertEqual(0, out.exitcode) # exit code = 0 = success
def test_muterun_good_command_stdout(self):
out = muterun(self.good_command)
self.assertEqual(b"test command\n", out.stdout) # stdout string is correct
def test_muterun_good_command_stderr(self):
out = muterun(self.good_command)
self.assertEqual(b"", out.stderr) # stderr empty string when successful command
def test_muterun_bad_command_exitcode(self):
out = muterun(self.bad_command)
self.assertEqual(127, out.exitcode) # returns 127 on absent executable
def test_muterun_bad_command_stdout(self):
out = muterun(self.bad_command)
self.assertEqual(b"", out.stdout) # std out is empty string on failure
def test_muterun_bad_command_stderr(self):
out = muterun(self.bad_command)
self.assertTrue(b'bogusapp: command not found' in out.stderr) # has std err message on failure
def test_muterun_missing_option_exitcode(self):
out = muterun(self.missing_option)
self.assertEqual(1, out.exitcode) # returns 1 on missing option to ls
def test_muterun_missing_option_stderr(self):
out = muterun(self.missing_option)
self.assertTrue(len(out.stderr) > 0) # there is a stderr message present
def test_muterun_missing_option_stdout(self):
out = muterun(self.missing_option)
self.assertEqual(b"", out.stdout) # std out is empty string on failure
#------------------------------------------------------------------------------
# Node.js script execution tests
#------------------------------------------------------------------------------
def test_execute_node_success(self):
self.assertTrue(execute_js(self.node_success_path))
def test_execute_node_fail(self):
self.assertFalse(execute_js(self.node_fail_path))
def test_muterun_node_success(self):
out = muterun_js(self.node_success_path)
self.assertEqual(b'success\n', out.stdout)
self.assertEqual(0, out.exitcode)
self.assertEqual(b'', out.stderr)
def test_muterun_node_fail(self):
out = muterun_js(self.node_fail_path)
self.assertEqual(b'error\n', out.stderr)
self.assertEqual(b'', out.stdout)
self.assertEqual(1, out.exitcode)
def test_run_node_success(self):
out = run_js(self.node_success_path)
self.assertEqual(b'success\n', out)
def test_run_node_success_suppress_stdout(self):
out = run_js(self.node_success_path, suppress_stdout=True)
self.assertEqual(b'success\n', out) # still returns a value, does not print to std out
def test_run_node_fail_suppress_stderr(self):
out = run_js(self.node_fail_path, suppress_stderr=True)
self.assertEqual(False, out) # returns False
def test_run_node_fail_suppress_exitstatus_false(self):
with self.assertRaises(SystemExit):
out = run_js(self.node_fail_path, suppress_exit_status_call=False) # when suppress_exit_status=True, Python script stopped prematurely
def test_run_node_success_suppress_exitstatus_false(self):
out = run_js(self.node_success_path, suppress_exit_status_call=False) # when command succeeds SystemExit is not raised
self.assertEqual(b'success\n', out)
#------------------------------------------------------------------------------
# Ruby script execution tests
#------------------------------------------------------------------------------
def test_execute_rb_success(self):
self.assertTrue(execute_rb(self.ruby_success_path))
def test_execute_rb_fail(self):
self.assertFalse(execute_rb(self.ruby_fail_path))
def test_muterun_rb_success(self):
out = muterun_rb(self.ruby_success_path)
self.assertEqual(b'success\n', out.stdout)
self.assertEqual(0, out.exitcode)
self.assertEqual(b'', out.stderr)
def test_muterun_rb_fail(self):
out = muterun_rb(self.ruby_fail_path)
self.assertEqual(b'error\n', out.stderr)
self.assertEqual(b'', out.stdout)
self.assertEqual(1, out.exitcode)
def test_run_rb_success(self):
out = run_rb(self.ruby_success_path)
self.assertEqual(b'success\n', out)
def test_run_rb_success_suppress_stdout(self):
out = run_rb(self.ruby_success_path, suppress_stdout=True)
self.assertEqual(b'success\n', out) # still returns a value, does not print to std out
def test_run_rb_fail_suppress_stderr(self):
out = run_rb(self.ruby_fail_path, suppress_stderr=True)
self.assertEqual(False, out) # returns False
def test_run_rb_fail_suppress_exitstatus_false(self):
with self.assertRaises(SystemExit):
out = run_rb(self.ruby_fail_path, suppress_exit_status_call=False) # when suppress_exit_status=True, Python script stopped prematurely
def test_run_rb_success_suppress_exitstatus_false(self):
out = run_js(self.node_success_path, suppress_exit_status_call=False) # when command succeeds SystemExit is not raised
self.assertEqual(b'success\n', out)
| StarcoderdataPython |
9600360 | <filename>src/experiment/process_results/average_inverse_load.py<gh_stars>1-10
import experiment.process_results.result_handling_utils as result_handling
def get_num_actions(e_runs):
"""
Takes all the data and extracts the number of actions. This is useful for defining the legend.
"""
agent_name = list(e_runs.keys())[0]
num_actions = e_runs[agent_name]['action_to_rate_ratio']['mean'].shape[1]
return num_actions, agent_name
def plot_action_to_activity_rate_cumulative_ratio():
"""
Generate a figures with confidence intervals of the average actual actions to activity rates
cumulative ratios.
It accepts three parameters by command line:
--path: Mandatory string with path to the directory with all runs.
--uncertainty_mode: Optional string, with possible values 'std_err', for standard error, or
'variance' for the population variance. If nothing is passed, 'std_err' is used by default.
--save_fig: Optional. If passed, it will store the figures in the data path. Otherwise, the
figures won't be saved but shown in the display.
Example:
python experiments/process_results/average_action_to_activity_cum_ratio.py
--save_fig --path path_to_data_folder
"""
parsed_args = result_handling.parse_args()
path = parsed_args.path
if parsed_args.save_fig:
save_fig = path + '/action_to_rate_cum_ratio.pdf'
else:
save_fig = None
uncertainty_mode = parsed_args.uncertainty_mode
data_keys = ['action', 'zeta_star']
data_keys_to_remove = ['cost', 'action', 'added', 'arrivals', 'drained', 'processing', 'state']
# Read data. Process data while reading to compute 'action_to_rate_ratio' (i.e. the cumulative
# ratio of actual actions to activity rates) with function 'compute_action_to_rate_cum_ratio'.
e_runs, _ = result_handling.read_experiment_runs(
path, data_keys,
function_pre_process=result_handling.compute_action_to_rate_cum_ratio,
function_pre_process_param=data_keys_to_remove)
# Stack 'compute_action_to_rate_cum_ratio' from different experiments as rows of a Numpy array.
e_runs, _ = result_handling.stack_exp_runs(e_runs, ['action_to_rate_ratio'])
# Compute statistics for the different runs.
e_runs = result_handling.aggregate_statistics(e_runs, ['action_to_rate_ratio'])
# Plot three figures.
num_actions, agent_name = get_num_actions(e_runs)
new_legend = [f"{agent_name} action {i}" for i in range(num_actions)]
result_handling.plot_aggregated_results_vs_time(
e_runs, 'Action to rate cumulative ratio', uncertainty_mode, save_fig,
'action_to_rate_ratio', new_legend=new_legend)
if __name__ == "__main__":
plot_action_to_activity_rate_cumulative_ratio()
| StarcoderdataPython |
11312902 | ## Exercรญcio 62 do livro Python 3 - Conceitos e Aplicaรงรตes - Uma Abordagem Didรกtica
""" Faรงa uma pesquisa sobre o Algoritmo de Ordenaรงรฃo Quicksort. Implemente uma funรงรฃo recursiva que use algoritmo para
organizar a lista L de forma crescente. Escreva um programa para testar a funรงรฃo. """
import random
import time
def quicksort(v, p, r): # v = vetor, p = inรญcio, r = final
if p < r:
q = particionar(v, p , r)
quicksort(v, p, q-1) # ordenar os elementos menores que o pivรด
quicksort(v, q+1, r) # ordenar os elementos maiores que o pivรด
def particionar(v, p, r):
x = v[p]
i = p
j = p + 1
while j <= r:
if v[j] < x:
i += 1
trocar(v, i, j)
j += 1
trocar(v, p, i)
return i
def trocar(v, n, m):
temp = v[n]
v[n] = v[m]
v[m] = temp
L = list(range(0, 10000))
random.shuffle(L)
antes = time.time()
quicksort(L, 0, len(L)-1)
depois = time.time()
total = (depois-antes)*1000
print(L)
print("\nO tempo total foi de %0.2f ms" % total)
| StarcoderdataPython |
1732890 | # -*- coding: utf-8 -*-
from django import template
from django.template.defaultfilters import stringfilter, escape
from django.utils.safestring import mark_safe
from django.conf import settings
import re
register = template.Library()
@register.tag(name='get_googlecharts_url')
def do_get_googlecharts_url(parser, token):
"""
"""
try:
contents = token.split_contents()
instance_name = contents[1]
except (ValueError, IndexError):
raise template.TemplateSyntaxError, "%r tag requires two or three arguments" % token.contents.split()[0]
return GoogleChartsNode(instance_name)
class GoogleChartsNode(template.Node):
def __init__(self, feature):
self.feature = template.Variable(feature)
def render(self, context):
feature = self.feature.resolve(context)
url = "http://chart.apis.google.com/chart?"
params = []
params.append(feature.url_parameters)
params.append("chf=bg,s,65432100")
if feature.display_rest:
addup = sum(map(lambda x: x.value, feature.featureranks))
else:
addup = sum(map(lambda x: x.value, [featurerank for featurerank in feature.featureranks if (featurerank.politician is not None or featurerank.party is not None)]))
datapoints = []
labels = []
colors = []
featureranks = sorted(feature.featureranks, key=lambda x: x.value, reverse=True)
for featurerank in featureranks:
if not feature.display_rest and (featurerank.politician is None and featurerank.party is None):
continue
datapoints.append(unicode(round(float(featurerank.value)/addup * 100, 1)))
labels.append("%s [%d]" % (unicode(featurerank), featurerank.value))
params.append("chd=t:"+",".join(datapoints))
params.append("chl="+"|".join(labels))
url += "&".join(params)
return url | StarcoderdataPython |
5082529 | <gh_stars>0
from .audio import *
from .graphics import *
from .input import *
from .other import *
from .graphics import _SizedInternalFormat, _CompressedInternalFormat, _TextureFormat | StarcoderdataPython |
9637333 | import datajoint as dj
#### LOAD DATABASE #########################################
from .dj_conn import *
imhotte = dj.schema(horst_imaging_db)
@imhotte
class AnatomicalMaskParams(dj.Lookup):
definition = """
# LUT for anatomical masks drawn in Napari to identify subregions in FOV
timestamp_mask_lookup: timestamp # Auto created timestamp
---
mec_label : tinyint # Medial entorhinal cortex (MEC) label name (integer)
pas_label = NULL : tinyint # Parasubiculum label name (integer)
rsa_label = NULL : tinyint # Retrosplenial agranular cortex (integer)
prh_label = NULL : tinyint # Perirhinal cortex
"""
@imhotte
class AnatomicalMask(dj.Manual):
definition = """
# Anatomical mask identifying anatomical regions in FOV
-> ProjectionCorr
---
-> AnatomicalMaskParams
anat_mask : blob@imgstore # Anatomical mask for regions in FOV
"""
@imhotte
class RoisCorrBrainLoc(dj.Computed):
definition = """
# Cell IDs and anatomical location
-> AnatomicalMask
-> RoisCorr
---
"""
class MEC(dj.Part):
definition = """
# Cells in MEC
-> master
---
"""
class PAS(dj.Part):
definition = """
# Cells in Parasubiculum
-> master
---
"""
class RSA(dj.Part):
definition = """
# Cells in Retrosplenial / Agranular cortex
-> master
---
"""
class PRH(dj.Part):
definition = """
# Cells in perirhinal cortex
-> master
---
"""
class Undefined(dj.Part):
# Has label "0" in anatomical mask
definition = """
# Cells elsewhere (not defined by any anatomical label)
-> master
---
"""
def make(self,key):
anatomical_mask = (AnatomicalMask & key).fetch1('anat_mask')
anatomical_lut = (AnatomicalMaskParams & key).fetch1()
# Sanity check - all labels present in LUT?
unique_labels_map = set(anatomical_mask.astype(int).flatten())
unique_labels_map.discard(0) # Don't count 0 because it is "undefined" (see below)
unique_labels_lut = set([value for value in anatomical_lut.values() if isinstance(value,int)])
if not all(elem in unique_labels_lut for elem in unique_labels_map):
raise KeyError('Labels found in anatomical map not found in LUT')
self.insert1(key)
cell = (RoisCorr.proj('center_x_corr','center_y_corr') & key).fetch1()
if (cell['center_x_corr'] > anatomical_mask.shape[1]) or (cell['center_y_corr'] > anatomical_mask.shape[0]):
self.Undefined.insert1({**key,**cell}, ignore_extra_fields=True)
elif anatomical_mask[int(cell['center_y_corr']),int(cell['center_x_corr'])] == anatomical_lut['mec_label']:
self.MEC.insert1({**key,**cell}, ignore_extra_fields=True)
elif anatomical_mask[int(cell['center_y_corr']),int(cell['center_x_corr'])] == anatomical_lut['pas_label']:
self.PAS.insert1({**key,**cell}, ignore_extra_fields=True)
elif anatomical_mask[int(cell['center_y_corr']),int(cell['center_x_corr'])] == anatomical_lut['rsa_label']:
self.RSA.insert1({**key,**cell}, ignore_extra_fields=True)
elif anatomical_mask[int(cell['center_y_corr']),int(cell['center_x_corr'])] == anatomical_lut['prh_label']:
self.PRH.insert1({**key,**cell}, ignore_extra_fields=True)
elif anatomical_mask[int(cell['center_y_corr']),int(cell['center_x_corr'])] == 0:
self.Undefined.insert1({**key,**cell}, ignore_extra_fields=True)
else:
raise KeyError(f'Label {anatomical_mask[int(cell["center_y_corr"]),int(cell["center_x_corr"])]} not known')
| StarcoderdataPython |
1641699 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sas_definition_bundle import SasDefinitionBundle
class DeletedSasDefinitionBundle(SasDefinitionBundle):
"""A deleted SAS definition bundle consisting of its previous id, attributes
and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The SAS definition id.
:vartype id: str
:ivar secret_id: Storage account SAS definition secret id.
:vartype secret_id: str
:ivar template_uri: The SAS definition token template signed with an
arbitrary key. Tokens created according to the SAS definition will have
the same properties as the template.
:vartype template_uri: str
:ivar sas_type: The type of SAS token the SAS definition will create.
Possible values include: 'account', 'service'
:vartype sas_type: str or ~azure.keyvault.models.SasTokenType
:ivar validity_period: The validity period of SAS tokens created according
to the SAS definition.
:vartype validity_period: str
:ivar attributes: The SAS definition attributes.
:vartype attributes: ~azure.keyvault.models.SasDefinitionAttributes
:ivar tags: Application specific metadata in the form of key-value pairs
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted SAS definition.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the SAS definition is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the SAS definition was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'secret_id': {'readonly': True},
'template_uri': {'readonly': True},
'sas_type': {'readonly': True},
'validity_period': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'secret_id': {'key': 'sid', 'type': 'str'},
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedSasDefinitionBundle, self).__init__(, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
| StarcoderdataPython |
3517626 | import click
import os
import subprocess
from collections import namedtuple
from jinja2 import Environment, FileSystemLoader
Urls = namedtuple('Urls', ['fqdn', 'git'])
NGINX_CONFIG_PATH = '/etc/nginx/apps.d'
GIT_PATH = '/opt/serve'
APP_PATH = '/home/serve/apps'
AUTHORIZED_KEYS = '/home/serve/.ssh/authorized_keys'
try:
with open('/home/serve/.urls', 'r') as f:
urls = f.read().strip().split()
URLS = Urls(urls[0], "ssh://serve@{}:{}".format(urls[1], urls[2]))
except IOError:
URLS = Urls('localhost:8080', 'ssh://serve@localhost:2222')
env = Environment(loader=FileSystemLoader('/home/serve/serve/serve/templates'))
def write_nginx_config(app):
"""
Write the NGINX config for the app.
"""
docker_port = subprocess.check_output(['docker', 'port', app]).strip()[-5:]
template = env.get_template('nginx_py.conf')
with open(os.path.join(NGINX_CONFIG_PATH, "{}.conf".format(app)), 'w') as f:
f.write(template.render(app=app, docker_port=docker_port))
def configure_git_hooks(app, git_path):
"""
Create the post-receive hook to build the docker image for the app.
"""
template = env.get_template('post-receive')
return template.render(app=app, git_path=git_path)
@click.group()
def serve():
pass
@serve.command(name='set-url', help="Set the url and port of the server.")
@click.argument('fqdn')
@click.option('--port', default=22, help="The port to use for SSH.")
def seturls(fqdn, port):
with open('/home/serve/.urls', 'w') as f:
f.write("{},{}".format(fqdn, port))
@serve.group(help="Commands for user management.")
def user():
pass
@user.command(name='list', help="List users")
def list_users():
"""
List all users that have permission to manage Serve
"""
click.echo("Users:")
with open(AUTHORIZED_KEYS, 'r') as f:
for line in f:
click.echo(line[line.index('== ') + 3:])
@user.command(help="Add a new user's SSH key to Serve.")
@click.argument('key')
def add(key):
"""
Add a new user's SSH key to Serve
"""
if key.startswith('ssh-rsa'):
user = key[key.find('== ') + 3:]
with open(AUTHORIZED_KEYS, 'a') as f:
f.write(key)
click.echo("Added SSH key for {} to authorized keys.".format(user))
else:
click.echo("The provided key does not appear to be valid.")
@user.command(help="Remove a user's access to Serve.")
@click.argument('user')
def remove(user):
"""
Remove a user's access to Serve.
"""
click.echo("Removing key for {}".format(user))
subprocess.call(["sed", "--in-place", '/{}/d'.format(user), AUTHORIZED_KEYS])
@serve.group(help="Commands for app management.")
def app():
pass
@app.command(name='list', help="List all apps.")
def list_apps():
"""
List all apps that have been created in Serve.
"""
click.echo("All apps:")
apps = [app[:-4] for app in os.listdir(GIT_PATH)]
click.echo("\n".join(apps))
@app.command(help="Create a new app to Serve.")
@click.argument('app')
def create(app):
"""
Create a new app.
"""
click.echo("Creating the {} app.".format(app))
app_git_path = os.path.join(GIT_PATH, '{}.git'.format(app))
os.mkdir(app_git_path)
os.chdir(app_git_path)
subprocess.call(['git', 'init', '--bare'])
with open(os.path.join(app_git_path, 'hooks', 'post-receive'), 'w') as f:
f.write(configure_git_hooks(app, app_git_path))
subprocess.call(['chmod', '+x', os.path.join('hooks', 'post-receive')])
click.echo("Created {} app with remote url {}{}".format(app, URLS.git,
app_git_path))
@app.command(help="Delete a Serve app.")
@click.argument('app')
def delete(app):
"""
Delete an app.
"""
click.echo("Deleting the {} app.".format(app))
try:
subprocess.call(['rm', '-rf', os.path.join(GIT_PATH, "{}.git".format(app))])
except subprocess.CalledProcessError:
click.echo("{} does not exist, skipping.".format(os.path.join(GIT_PATH, "{}.git".format(app))))
try:
subprocess.call(['rm', '-rf', os.path.join(APP_PATH, app)])
except subprocess.CalledProcessError:
click.echo("{} does not exist, skipping.".format(os.path.join(APP_PATH, app)))
try:
subprocess.call(['rm', os.path.join(NGINX_CONFIG_PATH, "{}.conf".format(app))])
except subprocess.CalledProcessError:
click.echo("{} does not exist, skipping.".format(os.path.join(NGINX_CONFIG_PATH, "{}.conf".format(app))))
try:
subprocess.call(['docker', 'stop', app])
subprocess.call(['docker', 'rm', app])
except subprocess.CalledProcessError:
click.echo("Docker app {} does not exist, skipping...".format(app))
try:
subprocess.call(['docker', 'rmi', "{}-image".format(app)])
except subprocess.CalledProcessError:
click.echo("Docker image {}-image does not exist, skipping...".format(app))
@app.command(help="Start an app.")
@click.argument('app')
def start(app):
"""
Start the container for the app.
"""
click.echo("Starting the {} container...".format(app))
subprocess.call(['docker', 'start', app])
# Rewrite the nginx config to update the port
write_nginx_config(app)
subprocess.call(['sudo', '/etc/init.d/nginx', 'reload'])
@app.command(help="Stop an app.")
@click.argument('app')
def stop(app):
"""
Stop the container for the app."
"""
click.echo("Stopping the {} container...".format(app))
subprocess.call(['docker', 'stop', app])
@app.command(help="Display information about a running application.")
@click.argument('app')
def info(app):
"""
Display information about a running application.
"""
app_git_path = os.path.join(GIT_PATH, '{}.git'.format(app))
docker_port = None
running = app in subprocess.check_output(['docker', 'ps'])
if running:
click.echo("Info for {} [running]:".format(app))
docker_port = subprocess.check_output(['docker', 'port', app]).strip()
else:
click.echo("Info for {} [stopped]:".format(app))
click.echo("Git URL: {}{}".format(URLS.git, app_git_path))
click.echo("URL: {}/{}".format(URLS.fqdn, app))
if docker_port:
click.echo("Mapped Ports: {}".format(docker_port))
| StarcoderdataPython |
5134931 | #crie um programa que tenha uma lista chamada numeros e duas funcoes chamadas sorteia() e somaPar(). A
# primeira funcao vai sortear 5 numeros e vai coloca-los dentro de uma lista e a segunda funcao vai mostrar
#a soma entre todos os valores PARES sorteados pela funcao anterior.
from random import randint
from time import sleep
def sorteia(lista): #2a. criar a funcao sorteia() , 5a. a funcao sorteia recebe lista como parametro
print('Sorteando 5 valores da lista: ', end='')
for cont in range(0, 5): #6o. cria for para escolher os numeros
n = randint(1, 10) # 7o. variavel n recebe randint de 1 ate 10
lista.append(n) #8o. colocar na lista os numeros sorteados da variavel n
print(f'{n} ', end='') #9o. imprimir os valores sorteados
sleep(0.3)
print('PRONTO!')
def somaPar(lista): #3a. criar a funcao somaPar() recebendo a lista e coloca-la como comentario
soma = 0 #11o. criar uma variavel com 0
for valor in lista: #12o. for para verificar se o numero รฉ PAR
if valor % 2 == 0:
soma += valor
print(f'Somando os valore PARES de {lista}, temos {soma}')#13o. Mostrar a lista e a soma dos PARES
#Programa Principal
numeros = list() #1a. criar uma lista vazia de numeros
sorteia(numeros) #4a. chamar a funcao sorteia
somaPar(numeros) #10o. chamar a funcao somaPar | StarcoderdataPython |
8092595 | <reponame>capybara-translation/ooxmlreplacer
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import uuid
import zipfile
from lxml import etree
W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'
W = '{%s}' % W_NS
A_NS = 'http://schemas.openxmlformats.org/drawingml/2006/main'
A = '{%s}' % A_NS
S_NS = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
S = '{%s}' % S_NS
XML_NS = 'http://www.w3.org/XML/1998/namespace'
XML = '{%s}' % XML_NS
TMX14_NS = 'http://www.lisa.org/tmx14'
TMX14 = '{%s}' % TMX14_NS
CT_NS = 'http://schemas.openxmlformats.org/package/2006/content-types'
CT = '{%s}' % CT_NS
REL_NS = 'http://schemas.openxmlformats.org/package/2006/relationships'
REL = '{%s}' % REL_NS
R_NS = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'
R = '{%s}' % R_NS
XLF_NS = 'urn:oasis:names:tc:xliff:document:1.2'
XLF = '{%s}' % XLF_NS
SDL_NS = 'http://sdl.com/FileTypes/SdlXliff/1.0'
SDL = '{%s}' % SDL_NS
MEM_NS = 'http://www.memsource.com/mxlf/2.0'
MEM = '{%s}' % MEM_NS
def extract_parts(filename, include_relationship_parts=False):
"""
Extract parts from the file specified.
:param filename: file from which to extract parts
:type filename: str | unicode
:param include_relationship_parts: True to include relationship parts
:type include_relationship_parts: bool
:return: dict of part names and their contents extracted
:rtype: list[dict[str, str]]
"""
filename = to_unicode(filename)
parts = []
try:
with zipfile.ZipFile(filename) as zf:
root = etree.fromstring(zf.read('[Content_Types].xml'))
for name in [elem.get('PartName')[1:] for elem in root.iter(CT + 'Override')]:
part = {'name': name, 'content': zf.read(name)}
parts.append(part)
if include_relationship_parts:
for item in zf.infolist():
if item.filename.endswith('.rels'):
parts.append({'name': item.filename, 'content': zf.read(item.filename)})
except zipfile.BadZipfile as e:
print(e)
return parts
def save_parts(parts, filename, new_filename):
"""
Save parts in a new file.
Any parts in the old file (filename) that do not exist in the specified parts (parts) are also added
into the new file (new_filename)
:param parts: parts to be saved in new_filename
:type parts: list[dict[str, str]]
:param filename: file to be merged into new_filename
:type filename: str | unicode
:param new_filename: file to which the parts will be saved
:type new_filename: str | unicode
:return:
"""
filename = to_unicode(filename)
new_filename = to_unicode(new_filename)
file_dir = os.path.split(filename)[0]
temp_file = os.path.join(file_dir, str(uuid.uuid4()))
try:
with zipfile.ZipFile(filename, 'r') as old_file:
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as new_file:
for item in old_file.infolist():
if len([pt for pt in parts if pt['name'] == item.filename]) == 0:
data = old_file.read(item.filename)
new_file.writestr(item, data)
for part in parts:
new_file.writestr(part['name'], part['content'])
except zipfile.BadZipfile as e:
print(e)
if os.path.exists(new_filename):
os.remove(new_filename)
os.rename(temp_file, new_filename)
def to_unicode(obj, encoding='utf-8'):
u"""
Convert a basestring object to a unicode object.
If the object is already unicode, return the object as-is.
:type obj: basestring
:param obj: object to convert
:type encoding: str | unicode
:param encoding: encoding used for unicode conversion
:rtype obj: unicode | unknown
:return obj: unicoded object
"""
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
| StarcoderdataPython |
9683439 | <gh_stars>0
import sys
s = input("Write sentences: ")
symb = input("Write search symbol: ")[0]
i = 0
count = 0
sl = []
while i < len(s):
if symb == s[i]:
print(f"Symbol '{symb}' found on {i} position")
count += 1
i += 1
if count == 0:
print(f"Symbol '{symb}' not found")
| StarcoderdataPython |
392427 | from django.db import models
from .fields import LiveField
from .managers import LiveManager
class LiveModel(models.Model):
"""Model support for soft-deleting using LiveField
LiveModel overrides Model.delete() to provide soft-deletion via
a LiveField. `.delete()` updates `Model.live` to `True`. Normal
deletion can performed usign `Model.hard_delete()`.
"""
live = LiveField()
objects = LiveManager()
all_objects = LiveManager(include_soft_deleted=True)
class Meta:
abstract = True
def delete(self):
self.soft_delete()
def hard_delete(self): # pylint: disable=super-on-old-class
super(LiveModel, self).delete()
def soft_delete(self):
self.live = False
self.save()
| StarcoderdataPython |
11236487 | <reponame>ESIPFed/SensorDat<gh_stars>1-10
import copy
import json
import numpy as np
import pandas as pd
import re
import influxdb_driver
def constant(value, series):
if isinstance(series, pd.Series):
return pd.Series(value, index=series.index)
elif isinstance(series, pd.Index):
return pd.Series(value, index=series)
else:
raise ValueError('Series must be a pandas Series or Index')
class Cookbook():
def __init__(self, cookbook_path):
self._registry = {}
self._comparisons = {'>' : np.greater,
'<' : np.less,
'>=' : np.greater_equal,
'<=' : np.less_equal,
'==' : np.equal,
'!=' : np.not_equal,
'in' : np.isin}
self._logicals = {'and' : np.logical_and.reduce,
'or' : np.logical_or.reduce,
'not' : np.logical_not.reduce,
'xor' : np.logical_xor.reduce}
self._transforms = {'return' : lambda x : x,
'+' : np.add,
'-' : np.subtract,
'*' : np.multiply,
'/' : np.divide,
'//' : np.floor_divide,
'%' : np.mod,
'**' : np.power,
'add' : np.add,
'subtract' : np.subtract,
'multiply' : np.multiply,
'divide' : np.divide,
'logaddexp' : np.logaddexp,
'logaddexp2' : np.logaddexp2,
'true_divide' : np.true_divide,
'floor_divide' : np.floor_divide,
'negative' : np.negative,
'positive' : np.positive,
'power' : np.power,
'remainder' : np.remainder,
'mod' : np.mod,
'fmod' : np.fmod,
'divmod' : np.divmod,
'absolute' : np.absolute,
'fabs' : np.fabs,
'rint' : np.rint,
'sign' : np.sign,
'heaviside' : np.heaviside,
'conj' : np.conj,
'exp' : np.exp,
'exp2' : np.exp2,
'log' : np.log,
'log2' : np.log2,
'log10' : np.log10,
'expm1' : np.expm1,
'log1p' : np.log1p,
'sqrt' : np.sqrt,
'square' : np.square,
'cbrt' : np.cbrt,
'reciprocal' : np.reciprocal,
'sin' : np.sin,
'cos' : np.cos,
'tan' : np.tan,
'arcsin' : np.arcsin,
'arccos' : np.arccos,
'arctan' : np.arctan,
'arctan2' : np.arctan2,
'hypot' : np.hypot,
'sinh' : np.sinh,
'cosh' : np.cosh,
'tanh' : np.tanh,
'arcsinh' : np.arcsinh,
'arccosh' : np.arccosh,
'arctanh' : np.arctanh,
'deg2rad' : np.deg2rad,
'rad2deg' : np.rad2deg,
'bitwise_and' : np.bitwise_and,
'bitwise_or' : np.bitwise_or,
'bitwise_xor' : np.bitwise_xor,
'invert' : np.invert,
'left_shift' : np.left_shift,
'right_shift' : np.right_shift,
'greater' : np.greater,
'greater_equal' : np.greater_equal,
'less' : np.less,
'less_equal' : np.less_equal,
'not_equal' : np.not_equal,
'equal' : np.equal,
'logical_and' : np.logical_and,
'logical_or' : np.logical_or,
'logical_xor' : np.logical_xor,
'logical_not' : np.logical_not,
'maximum' : np.maximum,
'minimum' : np.minimum,
'fmax' : np.fmax,
'fmin' : np.fmin,
'isfinite' : np.isfinite,
'isinf' : np.isinf,
'isnan' : np.isnan,
'isnat' : np.isnat,
'signbit' : np.signbit,
'copysign' : np.copysign,
'nextafter' : np.nextafter,
'spacing' : np.spacing,
'modf' : np.modf,
'ldexp' : np.ldexp,
'frexp' : np.frexp,
'fmod' : np.fmod,
'floor' : np.floor,
'ceil' : np.ceil,
'trunc' : np.trunc,
'diff' : pd.Series.diff,
'replace' : pd.Series.replace,
'constant' : constant,
'fillna' : pd.Series.fillna,
'mask' : pd.Series.mask,
'where' : pd.Series.where
}
self._aggregators = {'sum' : np.sum,
'prod' : np.prod,
'mean' : np.mean,
'std' : np.std,
'var' : np.var,
'min' : np.min,
'max' : np.max,
'argmin' : np.argmin,
'argmax' : np.argmax,
'median' : np.median,
'percentile' : np.percentile,
'nansum' : np.nansum,
'nanprod' : np.nanprod,
'nanmean' : np.nanmean,
'nanstd' : np.nanstd,
'nanvar' : np.nanvar,
'nanmin' : np.nanmin,
'nanmax' : np.nanmax,
'nanargmin' : np.nanargmin,
'nanargmax' : np.nanargmax,
'nanmedian' : np.nanmedian,
'nanpercentile' : np.nanpercentile,
'any' : np.any,
'all' : np.all}
self.input_drivers = {'influxdb' : influxdb_driver.InfluxDBInput}
self.output_drivers = {'influxdb' : influxdb_driver.InfluxDBOutput}
self.year_re = re.compile('\d\d\d\d')
self.ingredient_types = {}
self.annotations = {}
self.recipe_types = {'query' : self.parse_query,
'drop' : self.parse_drop,
'join' : self.parse_join,
'transformation' : self.parse_transformation,
'annotation' : self.parse_annotation,
'aggregation' : self.parse_aggregation,
'delete' : self.parse_delete,
'repeat' : self.parse_repeat,
'rolling' : self.parse_rolling}
with open(cookbook_path) as cookbook:
self.cookbook = json.load(cookbook)
def parse_repeat(self, repeat_dict):
_iterations = repeat_dict['iterations']
_tasks = repeat_dict['tasks']
for _ in range(_iterations):
for task in _tasks:
# TODO: Not ideal
task_copy = copy.deepcopy(task)
task_type = task_copy.pop('@type')
self.recipe_types[task_type](task_copy)
def parse_query(self, query_dict):
_select = query_dict['select']
select = self.parse_datasource(_select)
if 'where' in query_dict:
_where = query_dict['where']
where = self.parse_where(_where)
obj = select[where]
else:
obj = select
_as = query_dict['@as']
setattr(self, _as, obj)
def parse_drop(self, query_dict):
_select = query_dict['select']
select = self.parse_datasource(_select)
if 'where' in query_dict:
_where = query_dict['where']
where = self.parse_where(_where)
obj = select[~where]
else:
obj = select
_as = query_dict['@as']
setattr(self, _as, obj)
def parse_transformation(self, transformation_dict):
if 'where' in transformation_dict:
_where = transformation_dict['where']
where = self.parse_where(_where)
_do = transformation_dict['do']
do_result = self.parse_operation(_do, op_type='transformation')
if not isinstance(do_result, pd.Series):
do_result = pd.Series(do_result, index=where.index)
if 'else' in transformation_dict:
_else = transformation_dict['else']
else_result = self.parse_operation(_else, op_type='transformation')
else:
else_result = np.nan
obj = do_result.where(cond=where, other=else_result)
else:
_do = transformation_dict['do']
do_result = self.parse_operation(_do, op_type='transformation')
obj = do_result
_as = transformation_dict['@as']
setattr(self, _as, obj)
def parse_operation(self, transform_dict, op_type):
operation = transform_dict.pop('@type')
if op_type == 'transformation':
transform = self._transforms[operation]
elif op_type == 'aggregation':
transform = self._aggregators[operation]
else:
raise ValueError
args = transform_dict['args']
kwargs = transform_dict.setdefault('kwargs', {})
out_args = []
# TODO: Assumes kwargs aren't nested
out_kwargs = kwargs
if not isinstance(args, list):
args = [args]
for arg in args:
if isinstance(arg, dict):
if '@type' in arg:
arg = self.parse_operation(arg, op_type)
elif isinstance(arg, str):
baseobj = arg.split('.')[0]
if hasattr(self, baseobj):
arg = self.parse_datasource(arg)
elif re.search(self.year_re, arg):
try:
arg = pd.to_datetime(arg).tz_localize('UTC')#.asm8
except:
pass
out_args.append(arg)
return transform(*out_args, **out_kwargs)
def parse_aggregation(self, aggregation_dict):
_do = aggregation_dict['do']
obj = self.parse_operation(_do, op_type='aggregation')
_as = aggregation_dict['@as']
setattr(self, _as, obj)
def parse_rolling(self, rolling_dict):
_agg = rolling_dict.pop('aggregation')
_as = rolling_dict.pop('@as')
_select = rolling_dict.pop('select')
select = self.parse_datasource(_select)
roller = select.rolling(**rolling_dict)
obj = getattr(roller, _agg)()
setattr(self, _as, obj)
def parse_where(self, where_dict):
(key, values), = where_dict.items()
if key in self._logicals:
return self.parse_logical({key : values})
elif key in self._comparisons:
return self.parse_binary_comparison({key : values})
def parse_logical(self, logical_dict):
assert (len(logical_dict) == 1)
(key, values), = logical_dict.items()
args = []
assert isinstance(values, list)
for value in values:
assert isinstance(value, dict)
for subkey, subvalue in value.items():
# Avoid set lookups
if subkey in self._logicals:
result = self.parse_logical(value)
args.append(result)
elif subkey in self._comparisons:
result = self.parse_binary_comparison(value)
args.append(result)
logical_result = self._logicals[key](args)
return logical_result
def parse_binary_comparison(self, comparison_dict):
# Kind of wonky
assert (len(comparison_dict) == 1)
(key, values), = comparison_dict.items()
args = []
assert (len(values) == 2)
for arg in values:
if isinstance(arg, str):
baseobj = arg.split('.')[0]
if hasattr(self, baseobj):
arg = self.parse_datasource(arg)
elif re.search(self.year_re, arg):
try:
arg = pd.to_datetime(arg).tz_localize('UTC')#.asm8
except:
pass
args.append(arg)
comparison_result = self._comparisons[key](*args)
return comparison_result
def parse_delete(self, delete_dict):
_select = delete_dict['select']
for arg in _select:
delattr(self, arg)
def parse_join(self, join_dict):
obj_names = join_dict.pop('select')
objs = [getattr(self, name) for name in obj_names]
_as = join_dict.pop('@as')
obj = pd.concat(objs, **join_dict)
setattr(self, _as, obj)
def parse_datasource(self, name):
arg_list = name.split('.')
obj = self
for arg in arg_list:
obj = getattr(obj, arg)
return obj
def parse_annotation(self, annotation_dict):
# TODO: Note that if index changes, annotation will be misaligned
_where = annotation_dict.pop('where')
_name = annotation_dict.pop('name')
_on = annotation_dict.pop('on')
where = self.parse_where(_where)
result_d = {**annotation_dict}
result_d.update({'where' : where})
if not _on in self.annotations:
self.annotations.update({_on : {}})
self.annotations[_on].update({_name : {}})
self.annotations[_on][_name].update(result_d)
def set_datasources(self):
datasources = self.cookbook['sources']
for datasource in datasources:
datasource_type = datasource.pop('@type')
name = datasource['name']
path = datasource['url']
with open(path) as infile:
driver_dict = json.load(infile)
driver_context = driver_dict.pop('@context')
driver_type = driver_dict.pop('@type')
driver = self.input_drivers[driver_type](driver_dict)
setattr(self, name, driver)
def set_destinations(self):
destinations = self.cookbook['destinations']
for destination in destinations:
destination_type = destination.pop('@type')
name = destination['name']
path = destination['url']
with open(path) as infile:
driver_dict = json.load(infile)
driver_context = driver_dict.pop('@context')
driver_type = driver_dict.pop('@type')
driver = self.output_drivers[driver_type](driver_dict)
setattr(self, name, driver)
def prepare_ingredients(self):
ingredientlist = self.cookbook['ingredients']
for ingredient in ingredientlist:
driver_type = ingredient.pop('@type')
driver_name = ingredient.pop('source')
_as = ingredient.pop('@as')
datasource = getattr(self, driver_name)
data = datasource.run(ingredient)
setattr(self, _as, data)
def prepare_recipe(self):
recipelist = self.cookbook['recipe']
for recipe in recipelist:
recipe_type = recipe.pop('@type')
self.recipe_types[recipe_type](recipe)
def prepare_serving(self):
servinglist = self.cookbook['servings']
for serving in servinglist:
driver_type = serving.pop('@type')
driver_name = serving.pop('destination')
dataclient = getattr(self, driver_name)
# TODO: This needs to be moved into the driver
if isinstance(serving['fields'], list):
serving['fields'] = [getattr(self, field) for field in serving['fields']]
elif isinstance(serving['fields'], str):
serving['fields'] = getattr(self, serving['fields'])
if 'tags' in serving:
if isinstance(serving['tags'], list):
serving['tags'] = [getattr(self, tag) for tag in serving['tags']]
elif isinstance(serving['tags'], str):
serving['tags'] = getattr(self, serving['tags'])
data = dataclient.run(serving)
def run(self):
self.set_datasources()
self.set_destinations()
self.prepare_ingredients()
self.prepare_recipe()
self.prepare_serving()
| StarcoderdataPython |
6604828 | # -*- coding: UTF-8 -*-
import os
from gettext import gettext as _
from sugar.activity import activity
MODE_PHOTO = 0
MODE_VIDEO = 1
MODE_AUDIO = 2
TYPE_PHOTO = MODE_PHOTO
TYPE_VIDEO = MODE_VIDEO
TYPE_AUDIO = MODE_AUDIO
STATE_INVISIBLE = 0
STATE_READY = 1
STATE_RECORDING = 2
STATE_PROCESSING = 3
STATE_DOWNLOADING = 4
MEDIA_INFO = {}
MEDIA_INFO[TYPE_PHOTO] = {
'name' : 'photo',
'mime' : 'image/jpeg',
'ext' : 'jpg',
'istr' : _('Photo')
}
MEDIA_INFO[TYPE_VIDEO] = {
'name' : 'video',
'mime' : 'video/ogg',
'ext' : 'ogg',
'istr' : _('Video')
}
MEDIA_INFO[TYPE_AUDIO] = {
'name' : 'audio',
'mime' :'audio/ogg',
'ext' : 'ogg',
'istr' : _('Audio')
}
DBUS_SERVICE = "org.laptop.Record"
DBUS_IFACE = DBUS_SERVICE
DBUS_PATH = "/org/laptop/Record"
GFX_PATH = os.path.join(activity.get_bundle_path(), "gfx")
| StarcoderdataPython |
6504523 | #! /usr/bin/python3.6
import logging
import json
import re
from collections import defaultdict
from autobridge.Opt.Slot import Slot
from autobridge.Device.DeviceManager import DeviceU250
U250_inst = DeviceU250()
class CreateResultJson:
def __init__(
self,
floorplan,
wrapper_creater,
global_router,
board,
hls_prj_manager,
slot_manager,
top_rtl_parser,
new_top_rtl):
self.floorplan = floorplan
self.wrapper_creater = wrapper_creater
self.global_router = global_router
self.board = board
self.hls_prj_manager = hls_prj_manager
self.slot_manager = slot_manager
self.top_rtl_parser = top_rtl_parser
self.new_top_rtl = new_top_rtl
def __getNeighborSection(self):
neighbors = defaultdict(dict)
for slot in self.slot_manager.getActiveSlotsIncludeRouting():
for dir in ['UP', 'DOWN', 'LEFT', 'RIGHT']:
neighbor_slots = self.slot_manager.getNeighborSlotsIncludeRouting(slot, dir)
neighbors[slot.getRTLModuleName()][dir] = [s.getRTLModuleName() for s in neighbor_slots]
return neighbors
def __getOppositeDirection(self, dir):
if dir == 'UP':
return 'DOWN'
elif dir == 'DOWN':
return 'UP'
elif dir == 'LEFT':
return 'RIGHT'
elif dir == 'RIGHT':
return 'LEFT'
else:
assert False, f'inccorect direction {dir}'
def __getSharedAnchorSection(self, neighbors, path_planning_wire):
"""
[obsolete] collect the shared anchors with immediate neighbors in each direction
If we do not include passing edges to the wrapper, not all IOs are to immediate neighbors
Right now each slot will only connect with direct neighbors
"""
shared_anchors = defaultdict(dict)
for slot_name in neighbors.keys():
for dir in ['UP', 'DOWN', 'LEFT', 'RIGHT']:
neighbor_slots = neighbors[slot_name][dir]
reverse_dir = self.__getOppositeDirection(dir)
if f'{dir}_OUT' in path_planning_wire[slot_name]:
wires_out_from_this_slot = path_planning_wire[slot_name][f'{dir}_OUT']
wires_into_neighbor_slot = []
for neighbor_slot in neighbor_slots:
if f'{reverse_dir}_IN' in path_planning_wire[neighbor_slot]:
wires_into_neighbor_slot.extend( \
path_planning_wire[neighbor_slot][f'{reverse_dir}_IN'])
shared_anchors_outbound = [anchor for anchor in wires_out_from_this_slot if anchor in wires_into_neighbor_slot]
shared_anchors[slot_name][f'{dir}_OUT'] = \
{anchor : self.top_rtl_parser.getIntegerWidthOfRegOrWire(anchor) for anchor in shared_anchors_outbound}
if f'{dir}_IN' in path_planning_wire[slot_name]:
wires_into_this_slot = path_planning_wire[slot_name][f'{dir}_IN']
wires_out_from_neighbor_slot = []
for neighbor_slot in neighbor_slots:
if f'{reverse_dir}_OUT' in path_planning_wire[neighbor_slot]:
wires_out_from_neighbor_slot.extend( \
path_planning_wire[neighbor_slot][f'{reverse_dir}_OUT'])
shared_anchors_inbound = [anchor for anchor in wires_into_this_slot if anchor in wires_out_from_neighbor_slot]
shared_anchors[slot_name][f'{dir}_IN'] = \
{anchor : self.top_rtl_parser.getIntegerWidthOfRegOrWire(anchor) for anchor in shared_anchors_inbound}
return shared_anchors
def __getSlotWrapperRTLSection(self):
slot_to_rtl = {}
for slot in self.slot_manager.getActiveSlotsIncludeRouting():
slot_to_rtl[slot.getRTLModuleName()] = self.wrapper_creater.getCtrlInclusiveWrapper(slot)
return slot_to_rtl
def __getSlotToDirToWireNum(self, slot_to_dir_to_wires, all_slot_pairs):
"""
collect the total wire width at each boundary of slots
"""
slot_to_dir_to_num = defaultdict(dict)
def __getWireWidth(wire):
if len(wire) == 2:
return 1
assert len(wire) == 3
rtl_width = wire[1] # [X:0]
match = re.search(r'\[[ ]*(\d+)[ ]*:[ ]*(\d+)[ ]*\]', rtl_width)
int_width = int(match.group(1)) - int(match.group(2)) + 1
return int_width
for slot, dir_to_wires in slot_to_dir_to_wires.items():
for dir, wires in dir_to_wires.items():
all_wire_num = sum(__getWireWidth(wire) for wire in wires)
slot_to_dir_to_num[slot][dir] = all_wire_num
# double check the results are consistent
for pair in all_slot_pairs:
slot0_name = pair[0]
slot1_name = pair[1]
slot0 = Slot(U250_inst, slot0_name)
slot1 = Slot(U250_inst, slot1_name)
if slot0.isToTheLeftOf(slot1):
if 'RIGHT' in slot_to_dir_to_num[slot0_name]:
assert slot_to_dir_to_num[slot0_name]['RIGHT'] == slot_to_dir_to_num[slot1_name]['LEFT']
elif slot0.isToTheRightOf(slot1):
if 'LEFT' in slot_to_dir_to_num[slot0_name]:
assert slot_to_dir_to_num[slot0_name]['LEFT'] == slot_to_dir_to_num[slot1_name]['RIGHT']
elif slot0.isAbove(slot1):
if 'DOWN' in slot_to_dir_to_num[slot0_name]:
assert slot_to_dir_to_num[slot0_name]['DOWN'] == slot_to_dir_to_num[slot1_name]['UP']
elif slot0.isBelow(slot1):
if 'UP' in slot_to_dir_to_num[slot0_name]:
assert slot_to_dir_to_num[slot0_name]['UP'] == slot_to_dir_to_num[slot1_name]['DOWN']
else:
assert False
return slot_to_dir_to_num
def createResultJson(self, file = 'front_end_result.json'):
result = {}
result['CR_NUM_Y'] = self.board.CR_NUM_VERTICAL
result['CR_NUM_X'] = self.board.CR_NUM_HORIZONTAL
result['FPGA_PART_NAME'] = self.board.FPGA_PART_NAME
result['ORIG_RTL_PATH'] = self.hls_prj_manager.getRTLDir()
result['FloorplanVertex'] = self.floorplan.getSlotNameToVertexNames()
# result['FloorplanEdge'] = self.floorplan.getSlotNameToEdgeNames()
result['SlotIO'] = self.wrapper_creater.getSlotNameToIOList()
result['SlotWrapperRTL'] = self.__getSlotWrapperRTLSection()
result['TopIO'] = self.top_rtl_parser.getDirWidthNameOfAllIO()
result['NewTopRTL'] = self.new_top_rtl
result['PathPlanningWire'] = self.wrapper_creater.getSlotNameToDirToWires()
# result['Utilization'] = self.floorplan.getUtilization()
# result['Neighbors'] = self.__getNeighborSection()
result['ComputeSlots'] = [ s.getRTLModuleName() for s in self.slot_manager.getComputeSlots() ]
result['PureRoutingSlots'] = [ s.getRTLModuleName() for s in self.slot_manager.getPureRoutingSlots() ]
result['AllSlotPairs'] = [[p[0].getRTLModuleName(), p[1].getRTLModuleName()] for p in self.slot_manager.getAllSlotPairs()]
result['InSlotPipelineStyle'] = self.wrapper_creater.in_slot_pipeline_style
result['SlotToDirToWireNum'] = self.__getSlotToDirToWireNum(result['PathPlanningWire'], result['AllSlotPairs'])
f = open(file, 'w')
f.write(json.dumps(result, indent=2)) | StarcoderdataPython |
8183792 | <gh_stars>10-100
import time
import uuid
from test_helper import get_fail_workflow_execution
import floto
import floto.api
import floto.decider
from floto.specs import DeciderSpec
from floto.specs.task import ActivityTask, ChildWorkflow
from floto.specs.retry_strategy import InstantRetry
def decider_spec_child_workflow():
task_1 = ActivityTask(domain='floto_test', name='activity_fails_2', version='v2')
decider_spec = DeciderSpec(domain='floto_test',
task_list='child_workflow_task_list',
activity_tasks=[task_1],
default_activity_task_list='floto_activities',
repeat_workflow=False)
return decider_spec
def decider_spec_workflow():
rs = InstantRetry(retries=3)
child_workflow = ChildWorkflow(domain='floto_test', workflow_type_name='test_child_workflow',
workflow_type_version='v2', retry_strategy=rs, task_list='child_workflow_task_list')
decider_spec = DeciderSpec(domain='floto_test',
task_list=str(uuid.uuid4()),
activity_tasks=[child_workflow],
default_activity_task_list='floto_activities',
terminate_decider_after_completion=True)
return decider_spec
def test_16():
decider_workflow = floto.decider.Decider(decider_spec=decider_spec_workflow())
decider_child_workflow = floto.decider.Decider(decider_spec=decider_spec_child_workflow())
workflow_args = {'domain': decider_workflow.domain,
'workflow_type_name': 'my_workflow_type',
'workflow_type_version': 'v1',
'task_list': decider_workflow.task_list,
'input':{'foo':'bar'}}
response = floto.api.Swf().start_workflow_execution(**workflow_args)
run_id = response['runId']
workflow_id = 'my_workflow_type_v1'
print(30 * '-' + ' Running Test 16 ' + 30 * '-')
decider_child_workflow.run(separate_process=True)
decider_workflow.run()
decider_child_workflow._separate_process.terminate()
result = get_fail_workflow_execution(decider_workflow.domain, run_id, workflow_id)
print(30 * '-' + ' Done Test 16 ' + 30 * '-')
return result
| StarcoderdataPython |
8138312 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for Epistemic Neural Network design in JAX / Haiku."""
import abc
from typing import Dict, Iterator, NamedTuple, Optional, Tuple, Union
import dataclasses
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import typing_extensions
Array = Union[np.ndarray, jnp.DeviceArray]
DataIndex = Array # Always integer
Index = Array # Epistemic index, paired with network
RngKey = jnp.DeviceArray # Integer pairs, see jax.random
class OutputWithPrior(NamedTuple):
"""Output wrapper for networks with prior functions."""
train: Array
prior: Array = np.zeros(1)
extra: Dict[str, Array] = {}
@property
def preds(self) -> Array:
return self.train + jax.lax.stop_gradient(self.prior)
Output = Union[Array, OutputWithPrior]
class EpistemicModule(abc.ABC, hk.Module):
"""Epistemic neural network abstract base class as Haiku module."""
@abc.abstractmethod
def __call__(self, inputs: Array, index: Index) -> Output:
"""Forwards the epsitemic network y = f(x,z)."""
class ApplyFn(typing_extensions.Protocol):
"""Applies the ENN at given parameters, inputs, index."""
def __call__(self, params: hk.Params, inputs: Array, index: Index) -> Output:
"""Applies the ENN at given parameters, inputs, index."""
class InitFn(typing_extensions.Protocol):
"""Initializes the ENN at given rng_key, inputs, index."""
def __call__(self, rng_key: RngKey, inputs: Array, index: Index) -> hk.Params:
"""Initializes the ENN at given rng_key, inputs, index."""
class EpistemicIndexer(typing_extensions.Protocol):
"""Generates indices for the ENN from random keys."""
def __call__(self, key: RngKey) -> Index:
"""Samples a single index for the epistemic network."""
@dataclasses.dataclass
class EpistemicNetwork:
"""Convenient pairing of Haiku transformed function and index sampler."""
apply: ApplyFn
init: InitFn
indexer: EpistemicIndexer
class Batch(NamedTuple):
x: Array # Inputs
y: Array # Targets
data_index: Optional[DataIndex] = None # Integer identifiers for data
weights: Optional[Array] = None # None should default to weights = jnp.ones
extra: Dict[str, Array] = {} # You can put other optional stuff here
BatchIterator = Iterator[Batch] # Equivalent to the dataset we loop through
LossMetrics = Dict[str, Array]
class LossFn(typing_extensions.Protocol):
"""Calculates a loss based on one batch of data per rng_key."""
def __call__(self,
enn: EpistemicNetwork,
params: hk.Params,
batch: Batch,
key: RngKey) -> Tuple[Array, LossMetrics]:
"""Computes a loss based on one batch of data and a random key."""
| StarcoderdataPython |
8150456 | <filename>inbm/diagnostic-agent/tests/unit/test_docker_bench_security_runner.py<gh_stars>1-10
from unittest import TestCase
from diagnostic.docker_bench_security_runner import DockerBenchRunner
from mock import patch
docker_bench_pass_output = "[INFO] 6 - Docker Security Operations \n" \
"[INFO] 6.1 - Avoid image sprawl \n" \
"[INFO] * There are currently: 4 images\n" \
"[INFO] 6.2 - Avoid container sprawl\n" \
"[INFO] * There are currently a total of 12 containers, " \
"with 2 of them currently running"
docker_bench_fail_container_output = "[WARN] 5.25 - Ensure the container is restricted from " \
"acquiring additional privileges\n" \
"[WARN] * Privileges not restricted: abc\n" \
"[WARN] 5.26 - Ensure container health is checked at runtime\n" \
"[WARN] * Health check not set: abc\n" \
"[INFO] 5.27 - Ensure docker commands always get the latest version of the image\n" \
"[WARN] 5.28 - Ensure PIDs cgroup limit is used\n"
docker_bench_fail_image_output = "[WARN] 4.5 - Ensure Content trust for Docker is Enabled \n" \
"[WARN] 4.6 - Ensure HEALTHCHECK instructions have been added to the " \
"container image\n" \
"[WARN] * No Healthcheck found: [a1]\n" \
"[WARN] * No Healthcheck found: [a2]\n" \
"[WARN] * No Healthcheck found: [a3]\n"
class TestDockerBenchSecurityRunner(TestCase):
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.get_process')
@patch('inbm_lib.trtl.Trtl.run_docker_bench_security_test')
def test_success_dbs_run(self, mocked_trtl, mock_shellrunner):
mocked_trtl.return_value = docker_bench_pass_output
dbs = DockerBenchRunner()
dbs.start()
dbs.join()
self.assertTrue(dbs.result)
self.assertEqual("Test results: All Passed", dbs.result_string)
self.assertEqual([], dbs.failed_container_list)
self.assertEqual([], dbs.failed_image_list)
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.get_process')
@patch('inbm_lib.trtl.Trtl.run_docker_bench_security_test')
def test_fail_dbs_container_run(self, mocked_trtl, mock_shellrunner):
mocked_trtl.return_value = docker_bench_fail_container_output
dbs = DockerBenchRunner()
dbs.start()
dbs.join()
self.assertEquals(dbs.result, False)
self.assertEquals(dbs.result_string, "Test results: Failures in: 5.25,,5.26,,5.28")
self.assertEquals(dbs.failed_container_list, ['abc'])
self.assertEquals(dbs.failed_image_list, [])
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.get_process')
@patch('inbm_lib.trtl.Trtl.run_docker_bench_security_test')
def test_fail_dbs_image_run(self, mocked_trtl, mock_shellrunner):
mocked_trtl.return_value = docker_bench_fail_image_output
dbs = DockerBenchRunner()
dbs.start()
dbs.join()
self.assertEquals(dbs.result, False)
self.assertEquals(dbs.result_string, "Test results: Failures in: 4.5,4.6")
self.assertEquals(dbs.failed_container_list, [])
self.assertEquals(dbs.failed_image_list, ['a1', 'a2', 'a3'])
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.get_process')
@patch('inbm_lib.trtl.Trtl.run_docker_bench_security_test')
def test_fail_dbs_not_run(self, mocked_trtl, mock_shellrunner):
mocked_trtl.return_value = ''
dbs = DockerBenchRunner()
dbs.start()
dbs.join()
self.assertIsNone(dbs.result)
self.assertIsNone(dbs.result_string)
self.assertIsNone(dbs.failed_container_list)
self.assertIsNone(dbs.failed_image_list)
| StarcoderdataPython |
288429 | # Strings - this is how you type a comment in python
'Hello world using single quotes'
"Hello world using double quotes"
"""Hello world using
triple quotes, also known as
multi-line strings"""
# To print an object to the screen, use the print function
print('Hello world') # This will print Hello World in the terminal
# To use an apostrophe ' in your print statement, wrap your
# string in double quotes like below
print("I'm using an apostrophe in this statement")
# To use quotes within your statement, wrap your string in
# single quotes like below
print('This is a "quote from a book" which I like')
# Variables are used to reference objects in Python, think
# of variables as memory addresses. They should be named
# using snake case
message = "Hello! welcome to the Algorithm's course"
name = "Mashrur"
# You can use them in your print function to print to the screen
print(message)
print(name)
# We can combine strings and print them out using the print function
# the comma will automatically result in a space in-between the
# strings
print(message, name)
| StarcoderdataPython |
3476686 | from unittest import TestCase
from lie2me.fields import Boolean
from .common_tests import CommonTests
class BooleanTestCase(TestCase, CommonTests):
def setUp(self):
self.Field = Boolean
self.valid_default = 'yes'
def test_true_values(self):
field = Boolean()
samples = [True, 'true', 'True', 'yes', 'Yes', 1, '1', 'on', 'On']
for sample in samples:
value, error = field.submit(sample)
self.assertEqual(value, True)
def test_false_values(self):
field = Boolean()
samples = [False, 'false', 'False', 'no', 'No', 0, '0', 'off', 'Off']
for sample in samples:
value, error = field.submit(sample)
self.assertEqual(value, False)
def test_invalid_value(self):
field = Boolean()
value, error = field.submit(42)
self.assertEqual(error, 'Invalid boolean.')
| StarcoderdataPython |
320380 | from retic import Void
from iri2uri import Iri2Uri
from Timer import Timer
import os
#bg
def main()->Void:
iri2uri = Iri2Uri().iri2uri
### 1. test correctness on invariant iri
invariant = [
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:<EMAIL>",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
if not (uri == iri2uri(uri)):
raise AssertionError("test 1")
### 2. test correctness on variant iri
if not("http://Not-a-COMET.com/Not-a-COMET" == iri2uri("http://Not-a-COMET.com/Not-a-COMET")):
raise AssertionError("test 2")
if not("http://bitworking.org/?fred=another non_COMET" == iri2uri("http://bitworking.org/?fred=another non_COMET")):
raise AssertionError("test 3")
if not("http://bitworking.org/whats\"with\"all the COMET" == iri2uri("http://bitworking.org/whats\"with\"all the COMET")):
raise AssertionError("test 4")
if not("#acOMET" == iri2uri("#acOMET")):
raise AssertionError("test 5")
if not("/fred?bar=-BLACK LEFT POINTING INDEX#and-of-course-a-COMET" == iri2uri("/fred?bar=-BLACK LEFT POINTING INDEX#and-of-course-a-COMET")):
raise AssertionError("test 6")
if not("/fred?bar=a#ynotCOMET" == iri2uri(iri2uri("/fred?bar=a#ynotCOMET"))):
raise AssertionError("test 7")
#assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#COMET".encode('utf-8')))
### 3. stress test
iri2uri = Iri2Uri().iri2uri
testfile = os.path.join(os.path.dirname(__file__), "sample_urls.csv")
with open(testfile) as fd:
for ln in fd:
url = ln.split(",", 1)[0]
iri2uri(url)
return
t = Timer()
with t:
for i in range(10):
main()
| StarcoderdataPython |
1681239 | from unittest import TestCase
from algorithms.minimax import Minimax
from utils.node import Node
from tests._tree_example import eval_side_effect
from tests._tree_example import terminal_side_effect
from tests._tree_example import actions_side_effect
from tests._tree_example import result_side_effect
class TestMinimax(TestCase):
""" Test the minimax search algorithm
"""
def setUp(self):
self.minimax_search = Minimax()
def test_search_from_root(self):
""" Simple case, start from the root and return the expected minimax solution
:return: pass if minimax alg traverses path as expected
"""
root = Node("a")
value, move = self.minimax_search.search("a", terminal_side_effect, result_side_effect,
actions_side_effect, eval_side_effect, root)
self.assertEqual("a1", move, "This graph should determine a1 to be the optimal move")
self.assertEqual(3, value, "Valuation of this move is 3")
def test_search_near_terminal(self):
""" Simple case, start one one of root's children nodes and return the expected
minimax solution
:return: pass if minimax alg traverses path as expected
"""
root = Node("b")
value, move = self.minimax_search.search("b", terminal_side_effect, result_side_effect,
actions_side_effect, eval_side_effect, root)
self.assertEqual("b2", move,
"Starting at node b should just return the move to the maximum leaf node")
self.assertEqual(12, value, "Valuation of this move is 12")
def test_search_at_terminal(self):
""" Simple case, start at a terminating node
:return: pass if None is returned, since no moves exist in a terminating state
"""
root = Node("e")
value, move = self.minimax_search.search("e", terminal_side_effect, result_side_effect,
actions_side_effect, eval_side_effect, root)
self.assertIsNone(move)
def test_search_at_depth_limit(self):
""" Simple case, apply a depth limit to return a solution
:return: pass if optimal move given a depth limit is selected
"""
root = Node("a")
value, move = self.minimax_search.search("a", terminal_side_effect, result_side_effect,
actions_side_effect, eval_side_effect, root, 1)
self.assertEqual("a3", move,
"Depth limit means we should just end up picking our child max")
self.assertEqual(4, value, "Max child node is b, with a value of 4")
| StarcoderdataPython |
9751341 | <filename>python/testData/completion/fStringLikeCompletionNotAvailableInStrFormatCalls.py
my_expr = 42
s = 'foo{my_e<caret>'.format(my_expr='spam') | StarcoderdataPython |
1963196 | """
This module contains the discord cog for managing channels
"""
import asyncio
from typing import Optional
from uuid import uuid4, UUID
import discord
from discord.ext import commands
from models import Guild, ChannelCategory, VoiceChannel, ChannelOwner
class ChannelCog(commands.Cog, name='Channel Commands'):
"""
A command Cog that handles functionalities related to channels
"""
command_permissions: dict = dict(manage_channels=True,
manage_messages=True,
move_members=True)
message_delete_delay: int = 10
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(help='Add voice channel to the database')
@commands.has_guild_permissions(**command_permissions)
async def add_voice(self, ctx: commands.Context, channel: discord.VoiceChannel):
"""
Add the voice channel to the database enabling the fragment ability.
:param ctx: the command context
:param channel: the voice Channel to add
"""
guild_db, category_db, bot_init = _get_guild_and_category_db_or_false(ctx.guild, channel.category)
if not bot_init:
await ctx.send('you must initialize bot in the server first')
return
channel_db: VoiceChannel
channel_db, created = VoiceChannel.get_or_create(discord_id=channel.id, defaults=dict(
name=channel.name, guild=guild_db, category=category_db))
if created:
await ctx.send(f'Channel {channel.mention} is now a fragment channel',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
else:
await ctx.send(f'Channel {channel.mention} is already a fragment channel',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
@commands.command(aliases=['delete_voice'], help='Remove a voice channel from the database')
@commands.has_guild_permissions(**command_permissions)
async def remove_voice(self, ctx: commands.Context, channel: discord.VoiceChannel):
"""
Remove the voice channel from the database disabling its fragment ability
:param ctx: the command context
:param channel: the voice channel to remove
"""
channel_db: VoiceChannel = VoiceChannel.get_or_none(discord_id=channel.id)
if channel_db is None:
await ctx.send(f'Channel {channel.mention} is not a fragment channel',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
return
channel_db.delete_instance()
await ctx.send(f'Channel {channel.mention} is no longer a fragment channel',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
@commands.command(help='add a channel category and the channels within it.')
@commands.has_guild_permissions(**command_permissions)
async def add_category(self, ctx: commands.Context, category: discord.CategoryChannel):
"""
Add a channel category and the channels inside it to the database enabling the fragment ability.
:param ctx: the command context
:param category: the channel category to add
"""
guild_db, category_db, bot_init = _get_guild_and_category_db_or_false(ctx.guild, category)
if not bot_init:
await ctx.send('you must initialize bot in the server first')
return
voice_channels: list[discord.VoiceChannel] = category.voice_channels
if len(voice_channels) > 0:
print('\t\tVoice channels:')
for channel in voice_channels:
print(f'\t\t\tChannel: {channel}')
# get voice channel object from database if it exists otherwise insert into database
voice_channel_db: VoiceChannel
voice_channel_db, _ = VoiceChannel.get_or_create(discord_id=channel.id, defaults=dict(
name=channel.name, guild=guild_db, category=category_db))
print(f'\t\t\tChannel db: {voice_channel_db}')
await ctx.send(f'Channels in category {category.mention} are now fragment channels',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
@commands.command(aliases=['delete_category'], help='remove a channel category and the channels within it.')
@commands.has_guild_permissions(**command_permissions)
async def remove_category(self, ctx: commands.Context, category: discord.CategoryChannel):
"""
Remove a category from the database disabling its fragment ability of channels inside it
:param ctx: the command context
:param category: the channel category to remove
"""
category_db: ChannelCategory = ChannelCategory.get_or_none(discord_id=category.id)
if category_db is None:
await ctx.send(f'Category {category.mention} is not is not a fragment category',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
return
# delete voice channels
VoiceChannel.delete().where(VoiceChannel.category == category_db).execute()
# delete category
category_db.delete_instance()
await ctx.send(f'Channels in category {category.mention} are no longer a fragment channels',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
@commands.command(aliases=['display_voice', 'show_voice', 'list_fragment', 'voice_list'],
help='Display the voice channels with fragmentation enabled.')
@commands.has_guild_permissions(**command_permissions)
async def list_voice(self, ctx: commands.Context):
"""
Displays a list of fragment enabled channels.
:param ctx: the command context
"""
guild_db: Guild = Guild.get_or_none(discord_id=ctx.guild.id)
if guild_db is None:
await ctx.send('you must initialize bot in the server first', delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
return
result_message: str = 'Fragment Channels:\n'
categories_db: list[ChannelCategory] = list(ChannelCategory.select().where(ChannelCategory.guild == guild_db))
for category_db in categories_db:
category: discord.CategoryChannel = discord.utils.find(lambda c: c.id == category_db.discord_id,
ctx.guild.categories)
result_message += f'{category.mention}\n'
voice_channels_db: list[VoiceChannel] = list(
VoiceChannel.select().where(VoiceChannel.category == category_db))
for voice_channel_db in voice_channels_db:
voice_channel: discord.VoiceChannel = discord.utils.find(lambda c: c.id == voice_channel_db.discord_id,
category.voice_channels)
result_message += f'\t{voice_channel.mention}\n'
await ctx.send(result_message)
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member,
before: discord.VoiceState,
after: discord.VoiceState):
"""
This function runs when a users/members voice status changes
(ie. enter/exit a voice channel). If the channel the user enters is in the database,
a new temporary channel is created and the user is moved into it. Whenever the
temporary channel gets empty it is deleted.
:param member: the member whose voice status just changed
:param before: the previous voice state of the member
:param after: the current/new voice state of the member
"""
if after.channel is not None:
if after.channel == member.guild.afk_channel:
print('Entered AFK Channel!')
return
print(f'Connected to voice Channel: {after.channel}')
channel_db: VoiceChannel = VoiceChannel.get_or_none(discord_id=after.channel.id)
if channel_db is not None:
print(f'Channel is in the database: {channel_db}')
guild: discord.Guild = after.channel.guild
category: discord.CategoryChannel = after.channel.category
temp_channel_uuid: UUID = uuid4()
temp_channel_name = after.channel.name + " fragment: " + str(temp_channel_uuid)
temp_voice_channel: discord.VoiceChannel = await guild.create_voice_channel(name=temp_channel_name,
category=category)
await member.move_to(temp_voice_channel)
channel_owner_db: ChannelOwner = ChannelOwner.create(discord_id=member.id,
channel_id=temp_voice_channel.id)
def wait_to_empty(m, b, a):
if len(temp_voice_channel.members) == 0:
return True
if m.id == member.id and a.channel != temp_voice_channel:
new_owner: discord.Member = temp_voice_channel.members[0]
channel_owner_db.discord_id = new_owner.id
channel_owner_db.save()
return False
await self.bot.wait_for('voice_state_update', check=wait_to_empty)
await temp_voice_channel.delete()
channel_owner_db.delete_instance()
await asyncio.sleep(5)
print('Channel Deleted...')
else:
print('Channel is not in the database')
else:
print('Disconnected from voice channels')
@commands.command(help='Change voice channel limit')
async def voice_limit(self, ctx: commands.Context, limit: int):
"""
Changes the user limit of the voice channel owned by the member
:param ctx: the command context
:param limit: new channel user limit
"""
channel: discord.VoiceChannel = await _check_member_owns_channel(ctx)
if channel is None:
return
print(f'Changing {channel.name} limit to from: {channel.user_limit}, to: {limit}')
await channel.edit(user_limit=limit)
print(f'new limit: {channel.user_limit}')
await ctx.send(f'{channel.mention} limit changed to {limit if limit > 0 else "no limit"}',
delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
@commands.command(help='Change voice channel name')
async def voice_name(self, ctx: commands.Context, new_name: str):
"""
Changes the name of the voice channel owned by the member
:param ctx: the command context
:param new_name: new channel name
"""
channel: discord.VoiceChannel = await _check_member_owns_channel(ctx)
if channel is None:
return
print(f'Changing {channel.name} name to: {new_name}')
await channel.edit(name=new_name)
print(f'new name: {channel.name}')
await ctx.send(f'{channel.mention} name changed', delete_after=self.message_delete_delay)
await ctx.message.delete(delay=self.message_delete_delay)
def _get_guild_and_category_db_or_false(guild: discord.Guild, category: discord.CategoryChannel) \
-> (Guild, ChannelCategory, bool):
"""
check if the bot is initialized by checking if guild is in the database,
and get or add the channel category to the database.
:param guild: the discord guild object
:param category: the discord channel category object
:returns:(Guild: guild database object,
ChannelCategory: category database object,
bool: boolean true if bot has been initialized)
"""
guild_db: Guild = Guild.get_or_none(discord_id=guild.id)
if guild_db is None:
return None, None, False
category_db: ChannelCategory
category_db, _ = ChannelCategory.get_or_create(discord_id=category.id, defaults=dict(
name=category.name, guild=guild_db))
return guild_db, category_db, True
async def _check_member_owns_channel(ctx: commands.Context) -> Optional[discord.VoiceChannel]:
"""
checks if a member owns a channel or not and returns the channel if true.
:param ctx: the command context
:return: the owned voice channel or none
"""
channel_owner_db: ChannelOwner = ChannelOwner.get_or_none(discord_id=ctx.author.id)
if channel_owner_db is None:
print('no entry for member in database.')
await ctx.send(f'You do not control a channel.', delete_after=ChannelCog.message_delete_delay)
await ctx.message.delete(delay=ChannelCog.message_delete_delay)
return None
owned_voice_channel: discord.VoiceChannel = discord.utils.find(lambda c: c.id == channel_owner_db.channel_id,
ctx.guild.voice_channels)
if owned_voice_channel is None:
print('Channel not found in guild.')
await ctx.send(f'You do not control a channel.', delete_after=ChannelCog.message_delete_delay)
await ctx.message.delete(delay=ChannelCog.message_delete_delay)
return None
return owned_voice_channel
| StarcoderdataPython |
1678046 |
from rest_framework import pagination, viewsets
from rest_framework_filters import backends
from .filters import DFUserFilter, NoteFilter, UserFilter
from .models import Note, User
from .serializers import NoteSerializer, UserSerializer
class DFUserViewSet(viewsets.ModelViewSet):
# used to test compatibility with the drf-filters backend
# with standard django-filter FilterSets.
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
filterset_class = DFUserFilter
class FilterClassUserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
filterset_class = UserFilter
class FilterFieldsUserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
filterset_fields = {
'username': '__all__',
}
class UnfilteredUserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
class ComplexFilterFieldsUserViewSet(FilterFieldsUserViewSet):
queryset = User.objects.order_by('pk')
filter_backends = (backends.ComplexFilterBackend, )
filterset_fields = {
'id': '__all__',
'username': '__all__',
'email': '__all__',
}
class pagination_class(pagination.PageNumberPagination):
page_size_query_param = 'page_size'
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
filterset_class = UserFilter
class NoteViewSet(viewsets.ModelViewSet):
queryset = Note.objects.all()
serializer_class = NoteSerializer
filter_backends = (backends.RestFrameworkFilterBackend, )
filterset_class = NoteFilter
| StarcoderdataPython |
6688486 | from django.contrib import admin
from django.urls import path
from .views import *
app_name = 'products'
urlpatterns = [
path('',chart_select_view, name='main-products-view'),
] | StarcoderdataPython |
1838560 | <reponame>gratimax/jarvis
from multiprocessing import Pool
import os
import os.path
import pickle
import re
from pyquery import PyQuery as pq
import requests
from jarvis.model import *
p = re.compile('\s.\.')
COURSES_SCHEDULE = 'https://www.deanza.edu/schedule/classes/index.html'
COURSES_SEARCH = 'https://www.deanza.edu/schedule/classes/schsearch.html'
RATING_SEARCH = 'http://www.ratemyprofessors.com/search.jsp'
RATING_SHOW = 'http://www.ratemyprofessors.com/ShowRatings.jsp'
# guessed, but probably true?
QUARTER_MAPPING = {
'winter': 'W',
'spring': 'S',
'summer': 'M',
'fall': 'F',
}
# definitely true
DAYS_MAPPING = [
('M', 'Monday'),
('T', 'Tuesday'),
('W', 'Wednesday'),
('Th', 'Thursday'),
('F', 'Friday'),
('S', 'Saturday'),
]
# so far
TYPES_MAPPING = {
'CLAS': 'Class',
'LAB': 'Lab',
'TBA': 'TBA',
'LEC': 'Lecture',
}
YEAR = str(2018)
quarter = YEAR + QUARTER_MAPPING['spring']
def get_departments():
print('Downloading departments list...')
search_page = pq(requests.get(COURSES_SCHEDULE).text)
department_options = search_page('#Uniq_Course_ID').items('option')
departments = {
Department(department_id=option.val(), name=option.text())
for option in department_options if option.text() != ''
}
print('Found %s departments' % len(departments))
return departments
def row_describes_course(row):
return row('.snews').eq(2)('a')
def get_meeting_days(days_text):
days = set()
if days_text != 'TBA':
days_text_split = []
# don't kill me for this please
i = 0
days_text_len = len(days_text)
while i < days_text_len:
if days_text[i] in ['M', 'W', 'F', 'S']:
days_text_split.append(days_text[i])
i += 1
else:
if (i + 1) < days_text_len and days_text[i + 1] == 'h':
days_text_split.append('Th')
i += 2
else:
days_text_split.append('T')
i += 1
for day_key, day in DAYS_MAPPING:
if day_key in days_text_split:
days.add(day)
return days
def get_meeting_instructor(instructor_text):
comma_index = instructor_text.find(',')
last_name = instructor_text[:comma_index].capitalize()
first_name = instructor_text[comma_index + 1:].strip().capitalize()
middle_match = p.search(first_name)
if not middle_match == None:
print(middle_match.group())
first_name = first_name[:middle_match.start()]
return Instructor(first_name=first_name, last_name=last_name, rating='unknown')
def get_meeting_type(type_text):
return TYPES_MAPPING[type_text]
def get_time(time_text):
colon_index = time_text.find(':')
space_index = time_text.find(' ')
hours = int(time_text[:colon_index])
minutes = int(time_text[colon_index + 1:space_index])
period = time_text[space_index + 1:]
if period == 'PM' and hours < 12: hours += 12
if period == 'AM' and hours == 12: hours = 0
return MeetingTime(hours=hours, minutes=minutes)
def get_meeting_range(range_text):
if '-' in range_text:
[start, end] = [get_time(time_text) for time_text in range_text.split('-')]
return MeetingRange(start=start, end=end)
else:
return 'TBA'
def get_course_info(row):
snews = row('.snews')
crn = snews.eq(0).text()
course = snews.eq(1).text()
meeting_title = snews.eq(2).text()
opening_paren_index = meeting_title.rfind('(')
closing_paren_index = meeting_title.rfind(')')
title = meeting_title[:opening_paren_index].strip()
meeting_time = get_meeting_range(snews.eq(3).text())
meeting_days = get_meeting_days(snews.eq(4).text())
meeting_instructor = get_meeting_instructor(snews.eq(5).text())
meeting_location = snews.eq(6).text()
meeting_type = get_meeting_type(meeting_title[opening_paren_index + 1:closing_paren_index])
meeting = Meeting(time=meeting_time, days=meeting_days, instructor=meeting_instructor,
location=meeting_location, type=meeting_type)
return Course(crn=crn, course=course, title=title, meetings=[meeting])
def get_meeting_info(row):
snews = row('.snews')
time = get_meeting_range(snews.eq(2).text())
days = get_meeting_days(snews.eq(3).text())
instructor = get_meeting_instructor(snews.eq(4).text())
location = snews.eq(5).text()
meeting_type = get_meeting_type(snews.eq(1).text()[1:-1])
return Meeting(time=time, days=days, instructor=instructor, location=location, type=meeting_type)
def get_courses(department):
headers = {
'Referer': COURSES_SCHEDULE
}
payload = {
'Quarter': quarter,
'Uniq_Course_ID': department.department_id,
}
courses_page = pq(requests.post(COURSES_SEARCH, headers=headers, data=payload).text)
courses_table = courses_page('.anti_nav_print_adj').eq(2)
courses_rows = [row for row in courses_table.items('tr') if not row('hr') and row('.snews')]
courses_computed = []
for row in courses_rows:
if row_describes_course(row):
courses_computed.append(get_course_info(row))
else:
courses_computed[-1].meetings.append(get_meeting_info(row))
courses_count_computed = len(courses_computed)
print('Found %s courses for department %s' % (courses_count_computed, department.name))
return courses_computed
def find_instructor_rating(instructor):
human_name = '%s %s' % (instructor.first_name, instructor.last_name)
if instructor.first_name == 'M' and instructor.last_name == 'Staff':
print('Skipping instructor %s' % human_name)
return instructor
search_query = '%s %s De Anza College ' % (instructor.first_name, instructor.last_name)
payload = {
'query': search_query
}
search_page = pq(requests.get(RATING_SEARCH, params=payload).text)
results = [result.attr('href') for result in search_page('.listings').items('.listing a')]
if len(results) == 0:
print('No rating found for instructor %s' % human_name)
return instructor
else:
print('Rating found for instructor %s' % human_name)
result = results[0]
rating_id = result[result.find('=') + 1:]
return get_instructor_rating(instructor, rating_id)
def get_instructor_rating(instructor, rating_id):
payload = {
'tid': rating_id
}
rating_page = pq(requests.get(RATING_SHOW, params=payload).text)
rating = rating_page('.breakdown-header .grade').eq(0)
if rating:
return instructor._replace(rating=Rating(score=float(rating.text()), rating_id=rating_id))
else:
return instructor._replace(rating=Rating(score="unknown", rating_id=rating_id))
def instructor_id(instructor):
return "%s %s" % (instructor.first_name, instructor.last_name)
def scrape():
departments = get_departments()
print('Downloading course information...')
p = Pool(8)
courses = [course for department_courses in p.map(get_courses, departments) for course in department_courses]
instructors = set(
meeting.instructor for course in courses for meeting in course.meetings
)
print('Found %s total courses' % len(courses))
print('Downloading instructor rating information...')
instructors = p.map(find_instructor_rating, instructors)
print('Updating instructor data within courses..')
instructors_map = {instructor_id(instructor): instructor for instructor in instructors}
courses = [
course._replace(meetings=[
meeting._replace(instructor=instructors_map[instructor_id(meeting.instructor)])
for meeting in course.meetings
]) for course in courses
]
if not os.path.exists('data/'):
os.mkdir('data/')
print('Pickling department data...')
with open('data/departments.pickle', 'wb') as departments_file:
pickle.dump(departments, departments_file)
print('Pickling course data...')
with open('data/courses.pickle', 'wb') as courses_file:
pickle.dump(courses, courses_file)
print('Pickling instructor data...')
with open('data/instructors.pickle', 'wb') as instructors_file:
pickle.dump(instructors, instructors_file)
print('Done')
| StarcoderdataPython |
6563934 | import torch
from torch import nn
class MatMul(nn.Module):
def forward(self, A, V):
return torch.matmul(A, V)
| StarcoderdataPython |
6523005 | <reponame>thekitchenscientist/pydotART<filename>paintmixing.py
# -*- coding: utf-8 -*-
"""
Created on Thurs Aug 05 13:07:36 2021
@author: Stephen
https://github.com/thekitchenscientist/pydotART
Sample programs for the 41935 Set
https://rebrickable.com/sets/41935-1/lots-of-dots/#parts
"""
import pydotART as da
import numpy as np
### Basic Configuration ###
# What tiles are available? [Colour,Shape,Amount]
palette = np.array([[14,1.,36],
[14,3.,36],
[14,4.,36],
[22,1.,36],
[22,3.,36],
[22,4.,36],
])
# Canvas Size and Colour
x=6
y=6
colour=15
# How wide should the untiled border around the edge be?
border=0
# What is the location and size of a central gap [x,y,x_length, y_length]
cutout=np.array([0,0,0,0])
# Where should the tiling start from?
canvas_seed=[0,0]
# Where should the next tile be placed relative to the previous?
translation=np.array([0,0])
colour_mode = 'alternating'
canvas = da.Canvas(x,y,colour,border,cutout)
def Paint_Mixing(canvas,mixing_border = 0.5, mixing_amount = 0.2, tile_ID = 1):
x = canvas[0]
y = canvas[1]
change_over = int(round(y*mixing_border)-1)
#set up canvas
canvas[8][0] = 1
for i in range(0,y):
if i<=change_over:
canvas[8][1][:,i] = tile_ID
else:
canvas[8][1][:,i] = tile_ID+1
#flip colours
flips_allowed = int(round(x*mixing_amount))
for i in range(0,y):
flips_taken = 0
if i<=change_over:
colour_ID = tile_ID+1
trigger = (i+1)/y
flips_allowed += 1
elif i==change_over+1:
colour_ID = tile_ID
trigger = i/y
else:
colour_ID = tile_ID
trigger = abs(i-y)/y
flips_allowed -= 1
for j in range(0,x):
if np.random.random_sample() < trigger and flips_taken <= flips_allowed:
canvas[8][1][j,i] = colour_ID
flips_taken +=1
if j == x-1 and flips_taken < flips_allowed:
j=0
#extend canvas for search
extended_canvas = np.zeros((x+2,y+2))
extended_canvas[1:x+1, 1:y+1] = canvas[8][1]
extended_canvas[:,0] = extended_canvas[:,1]
extended_canvas[:,y+1] = extended_canvas[:,y]
extended_canvas[0,:] = extended_canvas[1,:]
extended_canvas[x+1,:] = extended_canvas[x,:]
#tidy shapes
for i in range(1,y+1):
for j in range(1,x+1):
above = extended_canvas[j-1,i]
infront = extended_canvas[j,i+1]
below = extended_canvas[j+1,i]
behind = extended_canvas[j,i-1]
current = extended_canvas[j,i]
if infront == behind != current and current == above == below:
canvas[8][0][j-1,i-1] = 1.0
elif infront == behind == above == below != current:
if i<=change_over:
canvas[8][0][j-1,i-1] = 4.4
else:
canvas[8][0][j-1,i-1] = 4.8
elif infront == above != current and current == behind == below:
canvas[8][0][j-1,i-1] = 3.4
elif infront == below != current and current == behind == above:
canvas[8][0][j-1,i-1] = 3.6
elif behind == above != current and current == infront == below:
canvas[8][0][j-1,i-1] = 3.2
elif behind == below != current and current == infront == above:
canvas[8][0][j-1,i-1] = 3.8
elif infront == above == below != current and current == behind:
canvas[8][0][j-1,i-1] = 4.8
elif behind == above == below != current and current == infront:
canvas[8][0][j-1,i-1] = 4.4
elif infront == behind == above != current and current == below:
canvas[8][0][j-1,i-1] = 4.2
elif infront == behind == below != current and current == above:
canvas[8][0][j-1,i-1] = 4.6
return canvas
canvas = da.Canvas(x*y,x*y,colour,border,cutout)
mixing_border = 0.5
mixing_amount = 0.2
tile_ID = 1
pattern_list = []
for i in range(0,x*y):
paint_canvas = da.Canvas(x,y,colour,border,cutout)
result = Paint_Mixing(paint_canvas,mixing_border, mixing_amount,tile_ID)
tile_ID +=2
pattern_list.append(result)
canvas = da.Spiral_Pattern_List(canvas,canvas_seed,pattern_list,direction=1,rotate=0,tile_ID = 1)
colour_pattern = da.Colour_Pattern(canvas,palette,colour_mode,tiles_check=False)
da.Ldraw_Pattern(canvas,"random paint mixing "+str(mixing_border)+ " " +str(mixing_amount)+ " " +str(x)+ "x" +str(y)+ " DOTS",add_steps=True)
"""for y in range(6,15):
x=6
mixing_border = 0.5
mixing_amount = 0.2
canvas = da.Canvas(x,y,colour,border,cutout)
canvas = Paint_Mixing(canvas,mixing_border, mixing_amount)
colour_pattern = da.Colour_Pattern(canvas,palette,colour_mode,tiles_check=False)
da.Ldraw_Pattern(canvas,"random paint mixing "+str(mixing_border)+ " " +str(mixing_amount)+ " " +str(x)+ "x" +str(y)+ " DOTS",add_steps=True)"""
| StarcoderdataPython |
299035 | <gh_stars>0
import torch
import torchtext
import torchtext.data as data
import locations
# TODO subclass an abstract Dataset class.
# Perhaps also a TextDataset class.
class WikiText2(object):
# Some sensible defaults.
name = "WikiText-2"
default_model = "wlm_lstm_medium"
location = None
# These defaults only apply to models doing one particular task. If the
# dataset is used in a different way, these may not be appropriate.
default_lr = 20
default_lr_steps = [(10, 0.25), (5, 0.25), (5, 0.25)]
default_epochs = 25
default_sequence_length = 35
# Preprocessed state.
# _text describes how to interpret the text in the dataset.
# _train, _val and _test hold different pieces of the dataset.
_text = None
_train = None
_val = None
_test = None
@staticmethod
def num_tokens():
WikiText2._init()
return len(WikiText2._text.vocab)
@staticmethod
def word_to_token(word):
"""Convert a string to an identifying integer."""
WikiText2._init()
return WikiText2._text.vocab.stoi[word]
@staticmethod
def token_to_word(token):
"""
Convert an identifying integer to a string.
There are two special strings which may be encountered:
* <eos> represents the end of stream
* <unk> represents an unknown word
"""
WikiText2._init()
return WikiText2._text.vocab.itos[token]
# Input channels and classes don't mean very much for text, but the
# analogy for both of them is the number of words in the dictionary.
@staticmethod
def input_channels():
return WikiText2.num_tokens()
@staticmethod
def num_classes():
return WikiText2.num_tokens()
@staticmethod
def data_loaders(num_workers, batch_size, distributed=False):
"""Return train and validation data loaders for the WMT dataset."""
return WikiText2.train_loader(num_workers, batch_size, distributed), \
WikiText2.val_loader(num_workers, batch_size)
@staticmethod
def train_loader(num_workers, batch_size, distributed):
# No support for distributed training yet.
assert not distributed
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._train,), batch_size=batch_size, shuffle=True,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def val_loader(num_workers, batch_size):
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._val,), batch_size=batch_size,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def test_loader(num_workers, batch_size):
WikiText2._init()
# Some weird notation because we have tuples of length 1.
iterator, = data.BPTTIterator.splits(
(WikiText2._test,), batch_size=batch_size,
bptt_len=WikiText2.default_sequence_length,
sort_key=lambda x: len(x.text))
return IteratorAdapter(iterator, num_workers=num_workers)
@staticmethod
def _init():
if WikiText2._text is not None:
return
# Set up field: describe how text will be interpreted.
WikiText2._text = data.Field(lower=True, batch_first=True)
# Make splits for data.
WikiText2._train, WikiText2._val, WikiText2._test = \
torchtext.datasets.WikiText2.splits(WikiText2._text)
# Build the vocabulary.
WikiText2._text.build_vocab(WikiText2._train)
class IteratorAdapter(torch.utils.data.DataLoader):
"""
Class which wraps torchtext's Iterator to create a DataLoader.
"""
def __init__(self, iterator, num_workers):
# TODO: pass more information to the superclass?
# The iterator already handles shuffling and batches.
super(IteratorAdapter, self).__init__(iterator.dataset,
num_workers=num_workers,
pin_memory=True)
self.iterator = iterator
def __len__(self):
return len(self.iterator)
def __iter__(self):
for batch in iter(self.iterator):
yield (batch.text, batch.target.flatten())
| StarcoderdataPython |
3200377 | import wandb
from src.Data import Data
from src.configurations import Configuration, WandbLogs
from src.models.BestPreTrainedModelForAStation import BestPreTrainedModelForAStation
from src.models.PerStationModel import PerStationModel
from src.run_utils import LogKeys, train_predict_evaluate_log_for_model_and_data
def run(config: Configuration = Configuration()):
wandb_run = wandb.init(project=config.wandb_project_name,
entity=config.wandb_entity,
mode=config.wandb_mode,
notes="Best trained model",
tags=['Best trained model', 'model per station'],
config=config.as_dict())
# Reload the Configuration (to allow for sweeps)
configuration = Configuration(**wandb.config)
# Load training and validation data
training_dev_data = Data(config.no_nan_in_bikes, config.development_data_path + config.dev_data_filename)
val_data = Data(config.no_nan_in_bikes, config.development_data_path + config.val_data_filename)
# Model
per_station_model = PerStationModel(configuration, training_dev_data, BestPreTrainedModelForAStation, True)
log_keys = {LogKeys.mae_dev.value: WandbLogs.per_station_mae_dev.value,
LogKeys.mae_val.value: WandbLogs.per_station_mae_val.value,
LogKeys.mae_per_station_dev.value: WandbLogs.per_station_mae_per_station_dev.value,
LogKeys.mae_per_station_val.value: WandbLogs.per_station_mae_per_station_val.value,
LogKeys.predictions_dev.value: WandbLogs.per_station_predictions_dev.value,
LogKeys.predictions_val.value: WandbLogs.per_station_predictions_val.value,
}
train_predict_evaluate_log_for_model_and_data(per_station_model, training_dev_data, val_data,
log_keys, wandb_run, configuration.log_predictions_to_wandb)
# Write predictions to csv
if configuration.run_test_predictions:
test_data = Data(config.no_nan_in_bikes, config.test_data_path)
if configuration.run_test_predictions:
per_station_result_test = per_station_model.predict(test_data)
per_station_result_test.write_to_csv('per_station_model_' + wandb_run.name, configuration)
def main():
run(Configuration())
if __name__ == "__main__":
main()
| StarcoderdataPython |
4999227 | <gh_stars>1-10
import abc
import operator
from typing import Optional, Dict
import numpy as np
from ..state import StateKey
class Evaluator(abc.ABC):
"""Base class that defines the general 'evaluator' interface."""
@abc.abstractmethod
def is_active(self) -> bool:
"""Returns whether or not an evaluator contains unlocked state variables."""
@abc.abstractmethod
def evaluate(self, lhs: Optional[np.ndarray] = None):
"""Interface for the general 'evaluation', optionally with Jacobians when lhs if not None."""
class EvalTreeNode:
"""Class for a node in a block-automatic evaluation tree."""
def __init__(self, value=None, *children):
self.value = value
self.children = list(children)
class AutoGradEvaluator(Evaluator):
"""Base class that defines the general 'auto-grad evaluator' interface."""
def evaluate(self, lhs: np.ndarray = None):
"""Interface for the general 'evaluation', optionally with Jacobians."""
# forward pass
tree = self.get_eval_tree()
if lhs is None:
return tree.value
# backward pass
jacs = self.compute_jacs(lhs, tree)
return tree.value, jacs
@abc.abstractmethod
def get_eval_tree(self) -> EvalTreeNode:
"""Interface for an evaluation method that returns the tree of evaluations (forward pass)."""
@abc.abstractmethod
def compute_jacs(self, lhs, tree) -> Dict[StateKey, np.ndarray]:
"""Interface for computing the Jacobian (backward pass)."""
@staticmethod
def merge_jacs(a, b, op=operator.add):
"""Utility function to merge Jabobians w.r.t. the same variables"""
# Start with symmetric difference; keys either in a or b, but not both
merged = {k: a.get(k, b.get(k)) for k in a.keys() ^ b.keys()}
# Update with `op()` applied to the intersection
merged.update({k: op(a[k], b[k]) for k in a.keys() & b.keys()})
return merged
| StarcoderdataPython |
1637475 | <gh_stars>0
# encoding=utf8
import logging
import numpy as np
from niapy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('niapy.algorithms.other')
logger.setLevel('INFO')
__all__ = ['SimulatedAnnealing', 'cool_delta', 'cool_linear']
def cool_delta(current_temperature, delta_temperature, **_kwargs):
r"""Calculate new temperature by differences.
Args:
current_temperature (float):
delta_temperature (float):
Returns:
float: New temperature.
"""
return current_temperature - delta_temperature
def cool_linear(current_temperature, starting_temperature, max_evals, **_kwargs):
r"""Calculate temperature with linear function.
Args:
current_temperature (float): Current temperature.
starting_temperature (float):
max_evals (int): Number of evaluations done.
_kwargs (Dict[str, Any]): Additional arguments.
Returns:
float: New temperature.
"""
return current_temperature - starting_temperature / max_evals
class SimulatedAnnealing(Algorithm):
r"""Implementation of Simulated Annealing Algorithm.
Algorithm:
Simulated Annealing Algorithm
Date:
2018
Authors:
<NAME> and <NAME>
License:
MIT
Reference URL:
Reference paper:
Attributes:
Name (List[str]): List of strings representing algorithm name.
delta (float): Movement for neighbour search.
starting_temperature (float); Starting temperature.
delta_temperature (float): Change in temperature.
cooling_method (Callable): Neighbourhood function.
epsilon (float): Error value.
See Also:
* :class:`niapy.algorithms.Algorithm`
"""
Name = ['SimulatedAnnealing', 'SA']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""None"""
def __init__(self, delta=0.5, starting_temperature=2000, delta_temperature=0.8, cooling_method=cool_delta,
epsilon=1e-23, *args, **kwargs):
"""Initialize SimulatedAnnealing.
Args:
delta (Optional[float]): Movement for neighbour search.
starting_temperature (Optional[float]); Starting temperature.
delta_temperature (Optional[float]): Change in temperature.
cooling_method (Optional[Callable]): Neighbourhood function.
epsilon (Optional[float]): Error value.
See Also
* :func:`niapy.algorithms.Algorithm.__init__`
"""
kwargs.pop('population_size', None)
super().__init__(1, *args, **kwargs)
self.delta = delta
self.starting_temperature = starting_temperature
self.delta_temperature = delta_temperature
self.cooling_method = cooling_method
self.epsilon = epsilon
def set_parameters(self, delta=0.5, starting_temperature=2000, delta_temperature=0.8, cooling_method=cool_delta,
epsilon=1e-23, **kwargs):
r"""Set the algorithm parameters/arguments.
Args:
delta (Optional[float]): Movement for neighbour search.
starting_temperature (Optional[float]); Starting temperature.
delta_temperature (Optional[float]): Change in temperature.
cooling_method (Optional[Callable]): Neighbourhood function.
epsilon (Optional[float]): Error value.
See Also
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
kwargs.pop('population_size', None)
super().set_parameters(population_size=1, **kwargs)
self.delta = delta
self.starting_temperature = starting_temperature
self.delta_temperature = delta_temperature
self.cooling_method = cooling_method
self.epsilon = epsilon
def get_parameters(self):
r"""Get algorithms parameters values.
Returns:
Dict[str, Any]:
See Also
* :func:`niapy.algorithms.Algorithm.get_parameters`
"""
d = Algorithm.get_parameters(self)
d.update({
'delta': self.delta,
'delta_temperature': self.delta_temperature,
'starting_temperature': self.starting_temperature,
'epsilon': self.epsilon
})
return d
def init_population(self, task):
r"""Initialize the starting population.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float, dict]:
1. Initial solution
2. Initial solutions fitness/objective value
3. Additional arguments
"""
x = task.lower + task.range * self.random(task.dimension)
current_temperature, x_fit = self.starting_temperature, task.eval(x)
return x, x_fit, {'current_temperature': current_temperature}
def run_iteration(self, task, x, x_fit, best_x, best_fitness, **params):
r"""Core function of the algorithm.
Args:
task (Task):
x (numpy.ndarray):
x_fit (float):
best_x (numpy.ndarray):
best_fitness (float):
**params (dict): Additional arguments.
Returns:
Tuple[numpy.ndarray, float, numpy.ndarray, float, dict]:
1. New solution
2. New solutions fitness/objective value
3. New global best solution
4. New global best solutions fitness/objective value
5. Additional arguments
"""
current_temperature = params.pop('current_temperature')
c = task.repair(x - self.delta / 2 + self.random(task.dimension) * self.delta, rng=self.rng)
c_fit = task.eval(c)
delta_fit, r = c_fit - x_fit, self.random()
if delta_fit < 0 or r < np.exp(delta_fit / current_temperature):
x, x_fit = c, c_fit
current_temperature = self.cooling_method(current_temperature, starting_temperature=self.starting_temperature,
delta_temperature=self.delta_temperature, max_evals=task.max_evals)
best_x, best_fitness = self.get_best(x, x_fit, best_x, best_fitness)
return x, x_fit, best_x, best_fitness, {'current_temperature': current_temperature}
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| StarcoderdataPython |
9644874 | import sys
def _encode_string(string: str) -> bytes:
"""Encode a string to utf-8.
This can be used to circumvent the issue of the standard encoding
of a windows console not being utf-8.
See: https://github.com/DanielNoord/pydocstringformatter/issues/13
"""
return string.encode("utf-8")
def _print_to_console(string: str, quiet: bool) -> None:
"""Print a string to the console while handling edge cases.
This can be used instead of print() whenever we want to
print emoji's or non-ASCII characters, but also to check if we are
in quiet mode.
"""
if not quiet:
sys.stdout.buffer.write(_encode_string(string))
def _sys_exit(value: int, option: bool) -> None:
"""Sys.exit if the boolean passed says to do so."""
if option:
sys.exit(value)
| StarcoderdataPython |
11372805 | <filename>pointnet2/generate_samples_distributed.py
import argparse
import os
import pdb
import subprocess
from os import listdir
import h5py
import numpy as np
import pickle
def dict_to_command(dictionary, exclude_keys=[]):
command = []
for key in dictionary:
if not key in exclude_keys:
if isinstance(dictionary[key], bool):
if dictionary[key]:
command.append('--' + str(key))
else:
command.append('--' + str(key))
command.append(str(dictionary[key]))
return command
def print_and_write(content, file_handle):
print(content)
file_handle.write(content + '\n')
def gather_generated_results(father_directory, num_ranks, remove_original_files=False):
data = {}
total_meta = []
cd_distance = []
emd_distance = []
f1_score = []
output_log_file = 'gathered_generation.log'
output_log_file = os.path.join(father_directory, output_log_file)
file_handle = open(output_log_file, 'w')
for rank in range(num_ranks):
directory = os.path.join(father_directory, 'rank_%d' % rank)
files = listdir(directory)
for fl in files:
if fl[-3:] == '.h5': # generated coarse complete point clouds
data_save_file = fl
file_name = os.path.join(directory, fl)
generated_file = h5py.File(file_name, 'r')
generated_data = np.array(generated_file['data'])
# data.append(generated_data)
if data_save_file in data.keys():
data[data_save_file].append(generated_data)
else:
data[data_save_file] = [generated_data]
generated_file.close()
print_and_write('data from %s is of shape %s' % (file_name, generated_data.shape), file_handle)
if remove_original_files:
os.remove(file_name)
print_and_write('%s is removed' % (file_name), file_handle)
elif fl[-4:] == '.pkl': # evaluation results
eval_save_file = fl
file_name = os.path.join(directory, fl)
pkl_file_handle = open(file_name, 'rb')
eval_result = pickle.load(pkl_file_handle)
pkl_file_handle.close()
# print('data from %s is of shape %s' % (file_name, generated_data.shape))
total_meta.append(eval_result['meta'])
cd_distance.append(eval_result['cd_distance'])
emd_distance.append(eval_result['emd_distance'])
f1_score.append(eval_result['f1'])
iteration = eval_result['iter']
if remove_original_files:
os.remove(file_name)
print_and_write('%s is removed' % (file_name), file_handle)
# save the gathered generated data
for key in data.keys():
data[key] = np.concatenate(data[key], axis=0)
print_and_write('The gathered data from all %s files of different ranks is of shape %s' % (key, data[key].shape), file_handle)
save_dir = father_directory #os.path.split(directory)[0]
gathered_data_save_file = os.path.join(save_dir, key)
hf = h5py.File(gathered_data_save_file, 'w')
hf.create_dataset('data', data=data[key])
hf.close()
print_and_write('The gathered data from all %s files of different ranks has been saved to %s' % (key, gathered_data_save_file), file_handle)
# save the gathered eval results
total_meta = np.concatenate(total_meta, axis=0)
cd_distance = np.concatenate(cd_distance, axis=0)
emd_distance = np.concatenate(emd_distance, axis=0)
f1_score = np.concatenate(f1_score, axis=0)
gathered_eval_save_file = os.path.join(save_dir, eval_save_file)
handle = open(gathered_eval_save_file, 'wb')
pickle.dump({'meta':total_meta, 'cd_distance':cd_distance,
'emd_distance':emd_distance, 'f1':f1_score,
'avg_cd':cd_distance.mean(), 'avg_emd':emd_distance.mean(), 'iter':iteration}, handle)
handle.close()
print_and_write('have saved gathered eval result at iter %s to %s' % (str(iteration), gathered_eval_save_file), file_handle)
print_and_write("CD loss: {} EMD loss: {} F1 Score: {}".format(cd_distance.mean(), emd_distance.mean(), f1_score.mean()), file_handle)
file_handle.close()
if __name__ == "__main__":
'''
running examples:
python generate_samples_distributed.py --execute --gather_results --remove_original_files --config exp_configs/mvp_configs/config_standard_attention_real_3072_partial_points_rot_90_scale_1.2_translation_0.1.json --ckpt_name pointnet_ckpt_643499.pkl --batch_size 32 --phase test --device_ids '0,1,2,3,4,5,6,7'
python generate_samples_distributed.py --execute --gather_results --remove_original_files --config exp_configs/mvp_configs/config_standard_attention_real_3072_partial_points_rot_90_scale_1.2_translation_0.1.json --ckpt_name pointnet_ckpt_643499.pkl --batch_size 32 --phase test_trainset --save_multiple_t_slices --t_slices '[100]' --device_ids '0,1,2,3,4,5,6,7'
python generate_samples_distributed.py --execute --gather_results --remove_original_files --config exp_configs/mvp_configs/config_standard_attention_real_3072_partial_points_rot_90_scale_1.2_translation_0.1.json --ckpt_name pointnet_ckpt_643499.pkl --batch_size 32 --phase test_trainset --use_a_precomputed_XT --T_step 100 --XT_folder mvp_dataloader/data/mvp_dataset/generated_samples/T1000_betaT0.02_shape_completion_mirror_rot_90_scale_1.2_translation_0.1/pointnet_ckpt_643499 --augment_data_during_generation --augmentation_during_generation '1.2; 90; 0.5; 0.1' --num_trials 10 --device_ids '0,1,2,3,4,5,6,7'
'''
parser = argparse.ArgumentParser()
parser.add_argument('--execute', action='store_true',
help='if true, we run the generation jobs, if fasle, we only gather the generated results (default: false)')
parser.add_argument('-g', '--gather_results', action='store_true',
help='whether to gather the generated results from different ranks (default: false)')
parser.add_argument('--remove_original_files', action='store_true',
help='whether to romve original files from different ranks (default: false)')
parser.add_argument('--num_ranks', type=int, default=-1,
help='number of ranks. We use num of gpus as num of ranks by default. We may want to use a larger num of ranks if we run exps on multi machines')
parser.add_argument('--start_rank', type=int, default=0,
help='The starting evaluation rank. We may want to use a different start rank if we run exps on multi machines')
parser.add_argument('-c', '--config', type=str, default='config.json',
help='JSON file for configuration')
parser.add_argument('--num_points', type=int, default=2048,
help='number of points in each shape')
parser.add_argument('--ckpt_iter', default='max',
help='Which checkpoint to use; assign a number or "max" or "best"')
parser.add_argument('--ckpt_name', default='',
help='Which checkpoint to use, the file name of the ckeckpoint')
parser.add_argument('-b', '--batch_size', type=int, default=64,
help='Number of data to be generated')
parser.add_argument('-p', '--phase', type=str, default='test_trainset',
help='which part of the dataset to generated samples')
parser.add_argument('--save_multiple_t_slices', action='store_true',
help='whether to save multiple t slices (default: false)')
parser.add_argument('--t_slices', type=str, default='[5,10,20,50,100,200,400,600,800]',
help='the intermediate t slices to save')
parser.add_argument('--fast_sampling', action='store_true',
help='whether to use fast sampling (default: false)')
parser.add_argument('--fast_sampling_config', type=str, default='100; var; quadratic; 0.0',
help='fast_sampling_config: length; sampling_method; schedule type; kappa')
parser.add_argument('--save_dir', type=str, default='',
help='the directory to save the generated samples')
# parser.add_argument('--generate_samples_for_a_subset_of_the_datset', action='store_true',
# help='whether to generate_samples_for_a_subset_of_the_datset (default: false)')
# parser.add_argument('--subset_indices_file', type=str, default='mvp_dataloader/random_indices.pkl',
# help='indices of the samples that we want to generate complete point clouds')
parser.add_argument('--augment_data_during_generation', action='store_true',
help='whether to augment data during evaluation (default: false)')
parser.add_argument('--augmentation_during_generation', type=str, default='1.2; 60; 0.5; 0.05',
help='augmentations during generation, (scale; rotation; mirror; translation)')
parser.add_argument('--use_a_precomputed_XT', action='store_true',
help='whether to use precomputed XT to generate samples (default: false)')
parser.add_argument('--T_step', type=int, default=100,
help='the t step to reverse begin with')
# load_pre_computed_XT=False, T_step=100, XT_folder=None
parser.add_argument('--XT_folder', type=str, default='mvp_dataloader/data/mvp_dataset/generated_samples/T1000_betaT0.02_shape_completion_mirror_rot_90_scale_1.2_translation_0.1/pointnet_ckpt_643499',
help='the folder that stores the precomputed XT')
parser.add_argument('-d', '--device_ids', type=str, default='0,1,2,3,4,5,6,7',
help='gpu device indices to use')
parser.add_argument('-s', '--std_out_file', type=str, default='generation.log',
help='generation log output file')
# multiple trials settings
parser.add_argument('-n', '--num_trials', type=int, default=1,
help='number of trials to generate for each partial point cloud')
parser.add_argument('--start_trial', type=int, default=1,
help='trial index to start')
args = parser.parse_args()
exclude_keys = ['execute', 'gather_results', 'remove_original_files', 'num_ranks', 'start_rank']
args_dict = vars(args)
device_ids = args_dict['device_ids']
device_ids = device_ids.split(',')
num_gpus = len(device_ids)
num_total_ranks = args.num_ranks if args.num_ranks > 0 else num_gpus
start_rank = args.start_rank
# be very careful to gather results when run exps on multi machines
if args.execute:
workers = []
for idx in range(num_gpus):
args_dict['rank'] = idx + start_rank
args_dict['world_size'] = num_total_ranks
args_dict['device_ids'] = device_ids[idx]
command = ['python', 'generate_samples.py']
command = command + dict_to_command(args_dict, exclude_keys)
print('%d-th command' % idx)
print(command)
p = subprocess.Popen(command)
workers.append(p)
for p in workers:
p.wait()
print('Have finished generating samples')
if args.gather_results:
if args.num_trials == 1:
std_out_file = args.std_out_file.split('.')[0]
std_out_file = std_out_file + '_rank_0.log'
std_out_file = os.path.join('generation_logs', std_out_file)
f = open(std_out_file, 'r')
for line in f:
# print(line)
if 'generated_samples will be saved to the directory' in line:
break
directory = line.split()[-1]
father_directory = os.path.split(directory)[0]
print('We will gather results from the directory', father_directory)
gather_generated_results(father_directory, num_total_ranks, remove_original_files=args.remove_original_files)
else:
for trial_idx in range(args.start_trial, args.start_trial+args.num_trials):
print('-' * 50)
print('Gathering results for trial %d' % trial_idx)
std_out_file = args.std_out_file.split('.')[0]
std_out_file = std_out_file + '_trial_%d_rank_0.log' % trial_idx
std_out_file = os.path.join('generation_logs', std_out_file)
f = open(std_out_file, 'r')
for line in f:
# print(line)
if 'generated_samples will be saved to the directory' in line:
break
directory = line.split()[-1]
father_directory = os.path.split(directory)[0]
print('We will gather results from the directory', father_directory)
gather_generated_results(father_directory, num_total_ranks, remove_original_files=args.remove_original_files)
| StarcoderdataPython |
9782910 | import pytest
from aiogtts.tokenizer import Tokenizer, symbols
from aiogtts.tokenizer.tokenizer_cases import tone_marks, period_comma, colon, other_punctuation, legacy_all_punctuation
def test_tone_marks():
t = Tokenizer([tone_marks])
_in = 'Lorem? Ipsum!'
_out = ['Lorem?', 'Ipsum!']
assert t.run(_in) == _out
def test_period_comma():
t = Tokenizer([period_comma])
_in = "Hello, it's 24.5 degrees in the U.K. today. $20,000,000."
_out = ['Hello', "it's 24.5 degrees in the U.K. today", '$20,000,000.']
assert t.run(_in) == _out
def test_colon():
t = Tokenizer([colon])
_in = "It's now 6:30 which means: morning missing:space"
_out = ["It's now 6:30 which means", ' morning missing', 'space']
assert t.run(_in) == _out
def test_other_punctuation():
other_punc_str = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
t = Tokenizer([other_punctuation])
assert len(t.run(other_punc_str)) - 1 == len(other_punc_str)
def test_legacy_all_punctuation():
t = Tokenizer([legacy_all_punctuation])
assert len(t.run(symbols.ALL_PUNC)) - 1 == len(symbols.ALL_PUNC)
if __name__ == '__main__':
pytest.main(['-x', __file__])
| StarcoderdataPython |
228364 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'AccessReviewInstanceArgs',
'AccessReviewReviewerArgs',
'ApprovalSettingsArgs',
'ApprovalStageArgs',
'IdentityArgs',
'ManagementLockOwnerArgs',
'NonComplianceMessageArgs',
'ParameterDefinitionsValueArgs',
'ParameterDefinitionsValueMetadataArgs',
'ParameterValuesValueArgs',
'PermissionArgs',
'PolicyDefinitionGroupArgs',
'PolicyDefinitionReferenceArgs',
'PrincipalArgs',
'RoleManagementPolicyApprovalRuleArgs',
'RoleManagementPolicyAuthenticationContextRuleArgs',
'RoleManagementPolicyEnablementRuleArgs',
'RoleManagementPolicyExpirationRuleArgs',
'RoleManagementPolicyNotificationRuleArgs',
'RoleManagementPolicyRuleTargetArgs',
'SingleUserArgs',
]
@pulumi.input_type
class AccessReviewInstanceArgs:
def __init__(__self__, *,
end_date_time: Optional[pulumi.Input[str]] = None,
start_date_time: Optional[pulumi.Input[str]] = None):
"""
Access Review Instance.
:param pulumi.Input[str] end_date_time: The DateTime when the review instance is scheduled to end.
:param pulumi.Input[str] start_date_time: The DateTime when the review instance is scheduled to be start.
"""
if end_date_time is not None:
pulumi.set(__self__, "end_date_time", end_date_time)
if start_date_time is not None:
pulumi.set(__self__, "start_date_time", start_date_time)
@property
@pulumi.getter(name="endDateTime")
def end_date_time(self) -> Optional[pulumi.Input[str]]:
"""
The DateTime when the review instance is scheduled to end.
"""
return pulumi.get(self, "end_date_time")
@end_date_time.setter
def end_date_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_date_time", value)
@property
@pulumi.getter(name="startDateTime")
def start_date_time(self) -> Optional[pulumi.Input[str]]:
"""
The DateTime when the review instance is scheduled to be start.
"""
return pulumi.get(self, "start_date_time")
@start_date_time.setter
def start_date_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_date_time", value)
@pulumi.input_type
class AccessReviewReviewerArgs:
def __init__(__self__, *,
principal_id: Optional[pulumi.Input[str]] = None):
"""
Descriptor for what needs to be reviewed
:param pulumi.Input[str] principal_id: The id of the reviewer(user/servicePrincipal)
"""
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the reviewer(user/servicePrincipal)
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_id", value)
@pulumi.input_type
class ApprovalSettingsArgs:
def __init__(__self__, *,
approval_mode: Optional[pulumi.Input[Union[str, 'ApprovalMode']]] = None,
approval_stages: Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]] = None,
is_approval_required: Optional[pulumi.Input[bool]] = None,
is_approval_required_for_extension: Optional[pulumi.Input[bool]] = None,
is_requestor_justification_required: Optional[pulumi.Input[bool]] = None):
"""
The approval settings.
:param pulumi.Input[Union[str, 'ApprovalMode']] approval_mode: The type of rule
:param pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]] approval_stages: The approval stages of the request.
:param pulumi.Input[bool] is_approval_required: Determine whether approval is required or not.
:param pulumi.Input[bool] is_approval_required_for_extension: Determine whether approval is required for assignment extension.
:param pulumi.Input[bool] is_requestor_justification_required: Determine whether requestor justification required.
"""
if approval_mode is not None:
pulumi.set(__self__, "approval_mode", approval_mode)
if approval_stages is not None:
pulumi.set(__self__, "approval_stages", approval_stages)
if is_approval_required is not None:
pulumi.set(__self__, "is_approval_required", is_approval_required)
if is_approval_required_for_extension is not None:
pulumi.set(__self__, "is_approval_required_for_extension", is_approval_required_for_extension)
if is_requestor_justification_required is not None:
pulumi.set(__self__, "is_requestor_justification_required", is_requestor_justification_required)
@property
@pulumi.getter(name="approvalMode")
def approval_mode(self) -> Optional[pulumi.Input[Union[str, 'ApprovalMode']]]:
"""
The type of rule
"""
return pulumi.get(self, "approval_mode")
@approval_mode.setter
def approval_mode(self, value: Optional[pulumi.Input[Union[str, 'ApprovalMode']]]):
pulumi.set(self, "approval_mode", value)
@property
@pulumi.getter(name="approvalStages")
def approval_stages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]]:
"""
The approval stages of the request.
"""
return pulumi.get(self, "approval_stages")
@approval_stages.setter
def approval_stages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]]):
pulumi.set(self, "approval_stages", value)
@property
@pulumi.getter(name="isApprovalRequired")
def is_approval_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approval is required or not.
"""
return pulumi.get(self, "is_approval_required")
@is_approval_required.setter
def is_approval_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approval_required", value)
@property
@pulumi.getter(name="isApprovalRequiredForExtension")
def is_approval_required_for_extension(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approval is required for assignment extension.
"""
return pulumi.get(self, "is_approval_required_for_extension")
@is_approval_required_for_extension.setter
def is_approval_required_for_extension(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approval_required_for_extension", value)
@property
@pulumi.getter(name="isRequestorJustificationRequired")
def is_requestor_justification_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether requestor justification required.
"""
return pulumi.get(self, "is_requestor_justification_required")
@is_requestor_justification_required.setter
def is_requestor_justification_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_requestor_justification_required", value)
@pulumi.input_type
class ApprovalStageArgs:
def __init__(__self__, *,
approval_stage_time_out_in_days: Optional[pulumi.Input[int]] = None,
escalation_approvers: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]] = None,
escalation_time_in_minutes: Optional[pulumi.Input[int]] = None,
is_approver_justification_required: Optional[pulumi.Input[bool]] = None,
is_escalation_enabled: Optional[pulumi.Input[bool]] = None,
primary_approvers: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]] = None):
"""
The approval stage.
:param pulumi.Input[int] approval_stage_time_out_in_days: The time in days when approval request would be timed out.
:param pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]] escalation_approvers: The escalation approver of the request.
:param pulumi.Input[int] escalation_time_in_minutes: The time in minutes when the approval request would be escalated if the primary approver does not approves.
:param pulumi.Input[bool] is_approver_justification_required: Determine whether approver need to provide justification for his decision.
:param pulumi.Input[bool] is_escalation_enabled: The value determine whether escalation feature is enabled.
:param pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]] primary_approvers: The primary approver of the request.
"""
if approval_stage_time_out_in_days is not None:
pulumi.set(__self__, "approval_stage_time_out_in_days", approval_stage_time_out_in_days)
if escalation_approvers is not None:
pulumi.set(__self__, "escalation_approvers", escalation_approvers)
if escalation_time_in_minutes is not None:
pulumi.set(__self__, "escalation_time_in_minutes", escalation_time_in_minutes)
if is_approver_justification_required is not None:
pulumi.set(__self__, "is_approver_justification_required", is_approver_justification_required)
if is_escalation_enabled is not None:
pulumi.set(__self__, "is_escalation_enabled", is_escalation_enabled)
if primary_approvers is not None:
pulumi.set(__self__, "primary_approvers", primary_approvers)
@property
@pulumi.getter(name="approvalStageTimeOutInDays")
def approval_stage_time_out_in_days(self) -> Optional[pulumi.Input[int]]:
"""
The time in days when approval request would be timed out.
"""
return pulumi.get(self, "approval_stage_time_out_in_days")
@approval_stage_time_out_in_days.setter
def approval_stage_time_out_in_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "approval_stage_time_out_in_days", value)
@property
@pulumi.getter(name="escalationApprovers")
def escalation_approvers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]:
"""
The escalation approver of the request.
"""
return pulumi.get(self, "escalation_approvers")
@escalation_approvers.setter
def escalation_approvers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]):
pulumi.set(self, "escalation_approvers", value)
@property
@pulumi.getter(name="escalationTimeInMinutes")
def escalation_time_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The time in minutes when the approval request would be escalated if the primary approver does not approves.
"""
return pulumi.get(self, "escalation_time_in_minutes")
@escalation_time_in_minutes.setter
def escalation_time_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "escalation_time_in_minutes", value)
@property
@pulumi.getter(name="isApproverJustificationRequired")
def is_approver_justification_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approver need to provide justification for his decision.
"""
return pulumi.get(self, "is_approver_justification_required")
@is_approver_justification_required.setter
def is_approver_justification_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approver_justification_required", value)
@property
@pulumi.getter(name="isEscalationEnabled")
def is_escalation_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value determine whether escalation feature is enabled.
"""
return pulumi.get(self, "is_escalation_enabled")
@is_escalation_enabled.setter
def is_escalation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_escalation_enabled", value)
@property
@pulumi.getter(name="primaryApprovers")
def primary_approvers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]:
"""
The primary approver of the request.
"""
return pulumi.get(self, "primary_approvers")
@primary_approvers.setter
def primary_approvers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]):
pulumi.set(self, "primary_approvers", value)
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type. This is the only required field when adding a system assigned identity to a resource.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type. This is the only required field when adding a system assigned identity to a resource.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ManagementLockOwnerArgs:
def __init__(__self__, *,
application_id: Optional[pulumi.Input[str]] = None):
"""
Lock owner properties.
:param pulumi.Input[str] application_id: The application ID of the lock owner.
"""
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[pulumi.Input[str]]:
"""
The application ID of the lock owner.
"""
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_id", value)
@pulumi.input_type
class NonComplianceMessageArgs:
def __init__(__self__, *,
message: pulumi.Input[str],
policy_definition_reference_id: Optional[pulumi.Input[str]] = None):
"""
A message that describes why a resource is non-compliant with the policy. This is shown in 'deny' error messages and on resource's non-compliant compliance results.
:param pulumi.Input[str] message: A message that describes why a resource is non-compliant with the policy. This is shown in 'deny' error messages and on resource's non-compliant compliance results.
:param pulumi.Input[str] policy_definition_reference_id: The policy definition reference ID within a policy set definition the message is intended for. This is only applicable if the policy assignment assigns a policy set definition. If this is not provided the message applies to all policies assigned by this policy assignment.
"""
pulumi.set(__self__, "message", message)
if policy_definition_reference_id is not None:
pulumi.set(__self__, "policy_definition_reference_id", policy_definition_reference_id)
@property
@pulumi.getter
def message(self) -> pulumi.Input[str]:
"""
A message that describes why a resource is non-compliant with the policy. This is shown in 'deny' error messages and on resource's non-compliant compliance results.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: pulumi.Input[str]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="policyDefinitionReferenceId")
def policy_definition_reference_id(self) -> Optional[pulumi.Input[str]]:
"""
The policy definition reference ID within a policy set definition the message is intended for. This is only applicable if the policy assignment assigns a policy set definition. If this is not provided the message applies to all policies assigned by this policy assignment.
"""
return pulumi.get(self, "policy_definition_reference_id")
@policy_definition_reference_id.setter
def policy_definition_reference_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_definition_reference_id", value)
@pulumi.input_type
class ParameterDefinitionsValueArgs:
def __init__(__self__, *,
allowed_values: Optional[pulumi.Input[Sequence[Any]]] = None,
default_value: Optional[Any] = None,
metadata: Optional[pulumi.Input['ParameterDefinitionsValueMetadataArgs']] = None,
type: Optional[pulumi.Input[Union[str, 'ParameterType']]] = None):
"""
The definition of a parameter that can be provided to the policy.
:param pulumi.Input[Sequence[Any]] allowed_values: The allowed values for the parameter.
:param Any default_value: The default value for the parameter if no value is provided.
:param pulumi.Input['ParameterDefinitionsValueMetadataArgs'] metadata: General metadata for the parameter.
:param pulumi.Input[Union[str, 'ParameterType']] type: The data type of the parameter.
"""
if allowed_values is not None:
pulumi.set(__self__, "allowed_values", allowed_values)
if default_value is not None:
pulumi.set(__self__, "default_value", default_value)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowedValues")
def allowed_values(self) -> Optional[pulumi.Input[Sequence[Any]]]:
"""
The allowed values for the parameter.
"""
return pulumi.get(self, "allowed_values")
@allowed_values.setter
def allowed_values(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "allowed_values", value)
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> Optional[Any]:
"""
The default value for the parameter if no value is provided.
"""
return pulumi.get(self, "default_value")
@default_value.setter
def default_value(self, value: Optional[Any]):
pulumi.set(self, "default_value", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['ParameterDefinitionsValueMetadataArgs']]:
"""
General metadata for the parameter.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['ParameterDefinitionsValueMetadataArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ParameterType']]]:
"""
The data type of the parameter.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ParameterType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ParameterDefinitionsValueMetadataArgs:
def __init__(__self__, *,
assign_permissions: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
strong_type: Optional[pulumi.Input[str]] = None):
"""
General metadata for the parameter.
:param pulumi.Input[bool] assign_permissions: Set to true to have Azure portal create role assignments on the resource ID or resource scope value of this parameter during policy assignment. This property is useful in case you wish to assign permissions outside the assignment scope.
:param pulumi.Input[str] description: The description of the parameter.
:param pulumi.Input[str] display_name: The display name for the parameter.
:param pulumi.Input[str] strong_type: Used when assigning the policy definition through the portal. Provides a context aware list of values for the user to choose from.
"""
if assign_permissions is not None:
pulumi.set(__self__, "assign_permissions", assign_permissions)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if strong_type is not None:
pulumi.set(__self__, "strong_type", strong_type)
@property
@pulumi.getter(name="assignPermissions")
def assign_permissions(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to have Azure portal create role assignments on the resource ID or resource scope value of this parameter during policy assignment. This property is useful in case you wish to assign permissions outside the assignment scope.
"""
return pulumi.get(self, "assign_permissions")
@assign_permissions.setter
def assign_permissions(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assign_permissions", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the parameter.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for the parameter.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="strongType")
def strong_type(self) -> Optional[pulumi.Input[str]]:
"""
Used when assigning the policy definition through the portal. Provides a context aware list of values for the user to choose from.
"""
return pulumi.get(self, "strong_type")
@strong_type.setter
def strong_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "strong_type", value)
@pulumi.input_type
class ParameterValuesValueArgs:
def __init__(__self__, *,
value: Optional[Any] = None):
"""
The value of a parameter.
:param Any value: The value of the parameter.
"""
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Any]:
"""
The value of the parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[Any]):
pulumi.set(self, "value", value)
@pulumi.input_type
class PermissionArgs:
def __init__(__self__, *,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
data_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
not_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
not_data_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Role definition permissions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] actions: Allowed actions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] data_actions: Allowed Data actions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_actions: Denied actions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_data_actions: Denied Data actions.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if data_actions is not None:
pulumi.set(__self__, "data_actions", data_actions)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
if not_data_actions is not None:
pulumi.set(__self__, "not_data_actions", not_data_actions)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed actions.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter(name="dataActions")
def data_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed Data actions.
"""
return pulumi.get(self, "data_actions")
@data_actions.setter
def data_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "data_actions", value)
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Denied actions.
"""
return pulumi.get(self, "not_actions")
@not_actions.setter
def not_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_actions", value)
@property
@pulumi.getter(name="notDataActions")
def not_data_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Denied Data actions.
"""
return pulumi.get(self, "not_data_actions")
@not_data_actions.setter
def not_data_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_data_actions", value)
@pulumi.input_type
class PolicyDefinitionGroupArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
additional_metadata_id: Optional[pulumi.Input[str]] = None,
category: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None):
"""
The policy definition group.
:param pulumi.Input[str] name: The name of the group.
:param pulumi.Input[str] additional_metadata_id: A resource ID of a resource that contains additional metadata about the group.
:param pulumi.Input[str] category: The group's category.
:param pulumi.Input[str] description: The group's description.
:param pulumi.Input[str] display_name: The group's display name.
"""
pulumi.set(__self__, "name", name)
if additional_metadata_id is not None:
pulumi.set(__self__, "additional_metadata_id", additional_metadata_id)
if category is not None:
pulumi.set(__self__, "category", category)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="additionalMetadataId")
def additional_metadata_id(self) -> Optional[pulumi.Input[str]]:
"""
A resource ID of a resource that contains additional metadata about the group.
"""
return pulumi.get(self, "additional_metadata_id")
@additional_metadata_id.setter
def additional_metadata_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "additional_metadata_id", value)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The group's category.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The group's description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The group's display name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@pulumi.input_type
class PolicyDefinitionReferenceArgs:
def __init__(__self__, *,
policy_definition_id: pulumi.Input[str],
group_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input['ParameterValuesValueArgs']]]] = None,
policy_definition_reference_id: Optional[pulumi.Input[str]] = None):
"""
The policy definition reference.
:param pulumi.Input[str] policy_definition_id: The ID of the policy definition or policy set definition.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_names: The name of the groups that this policy definition reference belongs to.
:param pulumi.Input[Mapping[str, pulumi.Input['ParameterValuesValueArgs']]] parameters: The parameter values for the referenced policy rule. The keys are the parameter names.
:param pulumi.Input[str] policy_definition_reference_id: A unique id (within the policy set definition) for this policy definition reference.
"""
pulumi.set(__self__, "policy_definition_id", policy_definition_id)
if group_names is not None:
pulumi.set(__self__, "group_names", group_names)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy_definition_reference_id is not None:
pulumi.set(__self__, "policy_definition_reference_id", policy_definition_reference_id)
@property
@pulumi.getter(name="policyDefinitionId")
def policy_definition_id(self) -> pulumi.Input[str]:
"""
The ID of the policy definition or policy set definition.
"""
return pulumi.get(self, "policy_definition_id")
@policy_definition_id.setter
def policy_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_definition_id", value)
@property
@pulumi.getter(name="groupNames")
def group_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The name of the groups that this policy definition reference belongs to.
"""
return pulumi.get(self, "group_names")
@group_names.setter
def group_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_names", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['ParameterValuesValueArgs']]]]:
"""
The parameter values for the referenced policy rule. The keys are the parameter names.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['ParameterValuesValueArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="policyDefinitionReferenceId")
def policy_definition_reference_id(self) -> Optional[pulumi.Input[str]]:
"""
A unique id (within the policy set definition) for this policy definition reference.
"""
return pulumi.get(self, "policy_definition_reference_id")
@policy_definition_reference_id.setter
def policy_definition_reference_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_definition_reference_id", value)
@pulumi.input_type
class PrincipalArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Deny assignment principal.
:param pulumi.Input[str] id: Object ID of the Azure AD principal (user, group, or service principal) to which the deny assignment applies. An empty guid '00000000-0000-0000-0000-000000000000' as principal id and principal type as 'Everyone' represents all users, groups and service principals.
:param pulumi.Input[str] type: Type of object represented by principal id (user, group, or service principal). An empty guid '00000000-0000-0000-0000-000000000000' as principal id and principal type as 'Everyone' represents all users, groups and service principals.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Object ID of the Azure AD principal (user, group, or service principal) to which the deny assignment applies. An empty guid '00000000-0000-0000-0000-000000000000' as principal id and principal type as 'Everyone' represents all users, groups and service principals.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of object represented by principal id (user, group, or service principal). An empty guid '00000000-0000-0000-0000-000000000000' as principal id and principal type as 'Everyone' represents all users, groups and service principals.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RoleManagementPolicyApprovalRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
setting: Optional[pulumi.Input['ApprovalSettingsArgs']] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyApprovalRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input['ApprovalSettingsArgs'] setting: The approval setting
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyApprovalRule')
if id is not None:
pulumi.set(__self__, "id", id)
if setting is not None:
pulumi.set(__self__, "setting", setting)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyApprovalRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def setting(self) -> Optional[pulumi.Input['ApprovalSettingsArgs']]:
"""
The approval setting
"""
return pulumi.get(self, "setting")
@setting.setter
def setting(self, value: Optional[pulumi.Input['ApprovalSettingsArgs']]):
pulumi.set(self, "setting", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyAuthenticationContextRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
claim_value: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyAuthenticationContextRule'.
:param pulumi.Input[str] claim_value: The claim value.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[bool] is_enabled: The value indicating if rule is enabled.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyAuthenticationContextRule')
if claim_value is not None:
pulumi.set(__self__, "claim_value", claim_value)
if id is not None:
pulumi.set(__self__, "id", id)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyAuthenticationContextRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> Optional[pulumi.Input[str]]:
"""
The claim value.
"""
return pulumi.get(self, "claim_value")
@claim_value.setter
def claim_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating if rule is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyEnablementRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
enabled_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyEnablementRule'.
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_rules: The list of enabled rules.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyEnablementRule')
if enabled_rules is not None:
pulumi.set(__self__, "enabled_rules", enabled_rules)
if id is not None:
pulumi.set(__self__, "id", id)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyEnablementRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter(name="enabledRules")
def enabled_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of enabled rules.
"""
return pulumi.get(self, "enabled_rules")
@enabled_rules.setter
def enabled_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enabled_rules", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyExpirationRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
is_expiration_required: Optional[pulumi.Input[bool]] = None,
maximum_duration: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyExpirationRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[bool] is_expiration_required: The value indicating whether expiration is required.
:param pulumi.Input[str] maximum_duration: The maximum duration of expiration in timespan.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyExpirationRule')
if id is not None:
pulumi.set(__self__, "id", id)
if is_expiration_required is not None:
pulumi.set(__self__, "is_expiration_required", is_expiration_required)
if maximum_duration is not None:
pulumi.set(__self__, "maximum_duration", maximum_duration)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyExpirationRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isExpirationRequired")
def is_expiration_required(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating whether expiration is required.
"""
return pulumi.get(self, "is_expiration_required")
@is_expiration_required.setter
def is_expiration_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_expiration_required", value)
@property
@pulumi.getter(name="maximumDuration")
def maximum_duration(self) -> Optional[pulumi.Input[str]]:
"""
The maximum duration of expiration in timespan.
"""
return pulumi.get(self, "maximum_duration")
@maximum_duration.setter
def maximum_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_duration", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyNotificationRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
notification_level: Optional[pulumi.Input[Union[str, 'NotificationLevel']]] = None,
notification_recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notification_type: Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]] = None,
recipient_type: Optional[pulumi.Input[Union[str, 'RecipientType']]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyNotificationRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[Union[str, 'NotificationLevel']] notification_level: The notification level.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_recipients: The list notification recipients.
:param pulumi.Input[Union[str, 'NotificationDeliveryMechanism']] notification_type: The type of notification.
:param pulumi.Input[Union[str, 'RecipientType']] recipient_type: The recipient type.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyNotificationRule')
if id is not None:
pulumi.set(__self__, "id", id)
if notification_level is not None:
pulumi.set(__self__, "notification_level", notification_level)
if notification_recipients is not None:
pulumi.set(__self__, "notification_recipients", notification_recipients)
if notification_type is not None:
pulumi.set(__self__, "notification_type", notification_type)
if recipient_type is not None:
pulumi.set(__self__, "recipient_type", recipient_type)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyNotificationRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="notificationLevel")
def notification_level(self) -> Optional[pulumi.Input[Union[str, 'NotificationLevel']]]:
"""
The notification level.
"""
return pulumi.get(self, "notification_level")
@notification_level.setter
def notification_level(self, value: Optional[pulumi.Input[Union[str, 'NotificationLevel']]]):
pulumi.set(self, "notification_level", value)
@property
@pulumi.getter(name="notificationRecipients")
def notification_recipients(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list notification recipients.
"""
return pulumi.get(self, "notification_recipients")
@notification_recipients.setter
def notification_recipients(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_recipients", value)
@property
@pulumi.getter(name="notificationType")
def notification_type(self) -> Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]]:
"""
The type of notification.
"""
return pulumi.get(self, "notification_type")
@notification_type.setter
def notification_type(self, value: Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]]):
pulumi.set(self, "notification_type", value)
@property
@pulumi.getter(name="recipientType")
def recipient_type(self) -> Optional[pulumi.Input[Union[str, 'RecipientType']]]:
"""
The recipient type.
"""
return pulumi.get(self, "recipient_type")
@recipient_type.setter
def recipient_type(self, value: Optional[pulumi.Input[Union[str, 'RecipientType']]]):
pulumi.set(self, "recipient_type", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyRuleTargetArgs:
def __init__(__self__, *,
caller: Optional[pulumi.Input[str]] = None,
enforced_settings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inheritable_settings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
level: Optional[pulumi.Input[str]] = None,
operations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_objects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The role management policy rule target.
:param pulumi.Input[str] caller: The caller of the setting.
:param pulumi.Input[Sequence[pulumi.Input[str]]] enforced_settings: The list of enforced settings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inheritable_settings: The list of inheritable settings.
:param pulumi.Input[str] level: The assignment level to which it is applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] operations: The type of operation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_objects: The list of target objects.
"""
if caller is not None:
pulumi.set(__self__, "caller", caller)
if enforced_settings is not None:
pulumi.set(__self__, "enforced_settings", enforced_settings)
if inheritable_settings is not None:
pulumi.set(__self__, "inheritable_settings", inheritable_settings)
if level is not None:
pulumi.set(__self__, "level", level)
if operations is not None:
pulumi.set(__self__, "operations", operations)
if target_objects is not None:
pulumi.set(__self__, "target_objects", target_objects)
@property
@pulumi.getter
def caller(self) -> Optional[pulumi.Input[str]]:
"""
The caller of the setting.
"""
return pulumi.get(self, "caller")
@caller.setter
def caller(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "caller", value)
@property
@pulumi.getter(name="enforcedSettings")
def enforced_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of enforced settings.
"""
return pulumi.get(self, "enforced_settings")
@enforced_settings.setter
def enforced_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enforced_settings", value)
@property
@pulumi.getter(name="inheritableSettings")
def inheritable_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of inheritable settings.
"""
return pulumi.get(self, "inheritable_settings")
@inheritable_settings.setter
def inheritable_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "inheritable_settings", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
The assignment level to which it is applied.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def operations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The type of operation.
"""
return pulumi.get(self, "operations")
@operations.setter
def operations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "operations", value)
@property
@pulumi.getter(name="targetObjects")
def target_objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of target objects.
"""
return pulumi.get(self, "target_objects")
@target_objects.setter
def target_objects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_objects", value)
@pulumi.input_type
class SingleUserArgs:
def __init__(__self__, *,
user_type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_backup: Optional[pulumi.Input[bool]] = None):
"""
The detail of a user.
:param pulumi.Input[str] user_type: The object id of the user.
Expected value is 'SingleUser'.
:param pulumi.Input[str] description: The description of the user.
:param pulumi.Input[str] id: The object id of the user.
:param pulumi.Input[bool] is_backup: The value indicating whether the user is a backup fallback approver
"""
pulumi.set(__self__, "user_type", 'SingleUser')
if description is not None:
pulumi.set(__self__, "description", description)
if id is not None:
pulumi.set(__self__, "id", id)
if is_backup is not None:
pulumi.set(__self__, "is_backup", is_backup)
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Input[str]:
"""
The object id of the user.
Expected value is 'SingleUser'.
"""
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: pulumi.Input[str]):
pulumi.set(self, "user_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The object id of the user.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isBackup")
def is_backup(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating whether the user is a backup fallback approver
"""
return pulumi.get(self, "is_backup")
@is_backup.setter
def is_backup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_backup", value)
| StarcoderdataPython |
3416109 | import sys, os, socket
from termcolor import colored
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
##
## @brief Class for threading simple server.
##
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
prg_lbl = colored('mini-mead','green', attrs=['bold'])
def greetings(self):
host = str(self.server_address[0]) if str(self.server_address[0]) != '0.0.0.0' else '127.0.0.1'
port = self.server_address[1]
print("\n[{}] .:. now serving your content @ {}".format( self.prg_lbl, colored( "http://{}:{}".format( host, port ),'yellow', attrs=['bold'] )))
def goodbye(self):
print("\n[{}] .:. shut'n her down captian ..hic..".format( self.prg_lbl ))
def server_close(self):
super().server_close()
self.goodbye()
def server_activate(self):
super().server_activate()
self.greetings()
class ThreadedServer:
@staticmethod
def create( config ):
return ThreadingSimpleServer(('0.0.0.0', config.port), SimpleHTTPRequestHandler)
| StarcoderdataPython |
1991886 | from calamari_ocr.utils.path import (
split_all_ext,
checkpoint_path,
keep_files_with_same_file_name,
filename,
)
from calamari_ocr.utils.glob import glob_all
| StarcoderdataPython |
1661844 | def dobra(lst):
pos=0
while pos<len(lst):
lst[pos]*=2
pos+=1
valor=[6,3,9,1,2]
dobra(valor)
print(valor) | StarcoderdataPython |
3302065 | <reponame>le717/ibm_jsonx<filename>ibm_jsonx/exceptions.py<gh_stars>0
class JsonxParsingException(Exception):
pass
| StarcoderdataPython |
3202458 | # pylint: disable=missing-docstring
from .fastenum import FastEnum
__all__ = ['FastEnum']
| StarcoderdataPython |
8060545 | from rest_framework import serializers
from my_portal.tooling import models
class ToolConditionSerializer(serializers.ModelSerializer):
class Meta:
model = models.ToolCondition
fields = '__all__'
class ToolSerializer(serializers.ModelSerializer):
class Meta:
model = models.Tool
fields = '__all__' | StarcoderdataPython |
6569067 | <filename>components/isceobj/InsarProc/runUpdatePreprocInfo.py
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def runUpdatePreprocInfo(self, use_dop="average"):
from .runFdMocomp import runFdMocomp
peg = self.insar.peg
pegRc = peg.radiusOfCurvature
masterFrame = self.insar.masterFrame
slaveFrame = self.insar.slaveFrame
prf1 = masterFrame.getInstrument().getPulseRepetitionFrequency()
prf2 = slaveFrame.getInstrument().getPulseRepetitionFrequency()
masterDoppler = self.insar.masterDoppler
slaveDoppler = self.insar.slaveDoppler
## red flag.
fd = runFdMocomp(self, use_dop=use_dop)
averageDoppler = masterDoppler.average(slaveDoppler)
averageDoppler.fractionalCentroid = fd
self.insar.dopplerCentroid =averageDoppler
return None
| StarcoderdataPython |
46060 | <gh_stars>0
#!/usr/bin/python3
__author__ = 'ziyan.yin'
from threading import Lock
from typing import Dict
from .unit import units
locks: Dict[str, Lock] = dict()
def synchronized(func):
key = f"{repr(func)}"
if key not in locks:
locks[key] = Lock()
def wrapper(*args, **kwargs):
with locks[key]:
return func(*args, **kwargs)
return wrapper
def register(cls):
module = '.'.join(cls.__module__.split('.')[1:])
if module not in units:
units[module] = dict()
units[module][cls.__name__] = cls
return cls
def priority(level: int):
if not 0 < level < 5:
level = 1
def wrapper(cls):
cls.level = level
return cls
return wrapper
def get_unit(router: str = 'common', method: str = ''):
if router:
if router in units and method in units[router]:
return True, units[router][method]
else:
return False, 'can not find service {0}[{1}]'.format(router, method)
return False, ''
| StarcoderdataPython |
8157636 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test Serving, Common"""
import os
from functools import wraps
from mindspore_serving import server
from mindspore_serving import log as logger
from mindspore_serving.client import Client
servable_index = 0
class ServingTestBase:
def __init__(self):
servable_dir = "serving_python_ut_servables"
self.servable_dir = os.path.join(os.getcwd(), servable_dir)
os.system(f"rm -rf {self.servable_dir}")
global servable_index
self.servable_name = "add_" + str(servable_index)
servable_index += 1
def init_servable(self, version_number, config_file, model_file="tensor_add.mindir"):
cur_dir = os.path.dirname(os.path.abspath(__file__))
config_file_abs = os.path.join(os.path.join(cur_dir, "../servable_config/"), config_file)
try:
with open(config_file_abs, "r") as fp:
servable_config_content = fp.read()
except FileNotFoundError:
servable_config_content = None
self.init_servable_with_servable_config(version_number, servable_config_content, model_file)
def init_servable_with_servable_config(self, version_number, servable_config_content,
model_file="tensor_add.mindir", model_config_file=None):
if not isinstance(model_file, (tuple, list)):
model_file = (model_file,)
self.version_number = version_number
self.model_files = model_file
self.servable_name_path = os.path.join(self.servable_dir, self.servable_name)
self.version_number_path = os.path.join(self.servable_name_path, str(version_number))
self.model_files_path = [os.path.join(self.version_number_path, file) for file in model_file]
try:
os.mkdir(self.servable_dir)
except FileExistsError:
pass
try:
os.mkdir(self.servable_name_path)
except FileExistsError:
pass
if self.model_files_path and version_number is not None:
try:
os.mkdir(self.version_number_path)
except FileExistsError:
pass
for file in self.model_files_path:
with open(file, "w") as fp:
print("model content", file=fp)
if servable_config_content is not None:
config_file = os.path.join(self.servable_name_path, "servable_config.py")
with open(config_file, "w") as fp:
fp.write(servable_config_content)
if model_config_file is not None:
model_config_file_path = os.path.join(self.servable_name_path, model_config_file)
with open(model_config_file_path, "w") as fp:
print("model config file", file=fp)
def init_distributed_servable(self, servable_config_content, rank_size, rank_table_content):
self.version_number = 1
self.servable_name_path = os.path.join(self.servable_dir, self.servable_name)
self.model_dir = os.path.join(self.servable_dir, "model_" + self.servable_name)
self.rank_table_content_path = os.path.join(self.servable_dir, self.servable_name + "_hccl.json")
try:
os.mkdir(self.servable_dir)
except FileExistsError:
pass
try:
os.mkdir(self.servable_name_path)
except FileExistsError:
pass
try:
os.mkdir(self.model_dir)
except FileExistsError:
pass
self.model_file_list = []
for i in range(rank_size):
model_file_path = os.path.join(self.model_dir, f"model{i}.mindir")
self.model_file_list.append(model_file_path)
with open(model_file_path, "w") as fp:
print("model content", file=fp)
self.group_config_list = []
for i in range(rank_size):
group_config = os.path.join(self.model_dir, f"group{i}.pb")
self.group_config_list.append(group_config)
with open(group_config, "w") as fp:
print("group config content", file=fp)
if servable_config_content is not None:
config_file = os.path.join(self.servable_name_path, "servable_config.py")
with open(config_file, "w") as fp:
fp.write(servable_config_content)
if rank_table_content is not None:
with open(self.rank_table_content_path, "w") as fp:
fp.write(rank_table_content)
@staticmethod
def add_on_exit(fun):
global exit_fun_list
exit_fun_list.append(fun)
exit_fun_list = []
client_create_list = []
def serving_test(func):
@wraps(func)
def wrap_test(*args, **kwargs):
try:
os.environ["SERVING_ENABLE_CPU_DEVICE"] = "0"
os.environ["SERVING_ENABLE_GPU_DEVICE"] = "0"
func(*args, **kwargs)
except Exception:
logger.error("Serving test catch exception")
serving_logs_dir = os.path.join(os.getcwd(), "serving_logs")
os.system(f"ls -l {serving_logs_dir}/*.log && cat {serving_logs_dir}/*.log")
raise
finally:
logger.info("Serving test begin to clear")
server.master.context.set_max_enqueued_requests(10000)
server.stop()
global client_create_list
for client in client_create_list:
del client.stub
client.stub = None
client_create_list = []
global exit_fun_list
for fun in exit_fun_list:
fun()
exit_fun_list = []
cwd_dir = os.getcwd()
servable_dir = os.path.join(cwd_dir, "serving_python_ut_servables")
os.system(f"rm -rf {servable_dir}")
temp_rank_dir = os.path.join(cwd_dir, "temp_rank_table")
os.system(f"rm -rf {temp_rank_dir}")
serving_logs_dir = os.path.join(cwd_dir, "serving_logs")
os.system(f"rm -rf {serving_logs_dir}")
unix_socket_files_dir = os.path.join(cwd_dir, "unix_socket_files")
os.system(f"rm -rf {unix_socket_files_dir}")
unix_socket_files_dir = os.path.join(cwd_dir, "device_")
os.system(f"rm -rf {unix_socket_files_dir}*")
os.system(f"rm -rf *.crt *.key *.csr *.srl")
logger.info("Serving test end clear")
return wrap_test
def create_client(address, servable_name, method_name, version_number=0, ssl_config=None):
client = Client(address, servable_name, method_name, version_number, ssl_config)
client_create_list.append(client)
return client
def generate_cert(server_ip="0.0.0.0", server_host_name="serving", common_name="serving.com"):
cur_dir = os.path.dirname(os.path.abspath(__file__))
shell_path = os.path.join(os.path.join(cur_dir, "../servable_config/"), "generate_certs.sh")
os.environ["SERVING_IP"] = server_ip
os.environ["SERVING_HOSTNAME"] = server_host_name
os.environ["SERVING_COMMON_NAME"] = common_name
with open(shell_path, 'r') as f:
command = f.read()
os.system(command)
def release_client(client):
del client.stub
client.stub = None
# test servable_config.py with client
servable_config_import = r"""
import numpy as np
from mindspore_serving.server import register
"""
servable_config_declare_servable = r"""
register.declare_servable(servable_file="tensor_add.mindir", model_format="MindIR", with_batch_dim=False)
"""
servable_config_preprocess_cast = r"""
def add_trans_datatype(x1, x2):
return x1.astype(np.float32), x2.astype(np.float32)
"""
servable_config_method_add_common = r"""
@register.register_method(output_names=["y"])
def add_common(x1, x2): # only support float32 inputs
y = register.call_servable(x1, x2)
return y
"""
servable_config_method_add_cast = r"""
@register.register_method(output_names=["y"])
def add_cast(x1, x2):
x1, x2 = register.call_preprocess(add_trans_datatype, x1, x2) # cast input to float32
y = register.call_servable(x1, x2)
return y
"""
def init_add_servable():
base = ServingTestBase()
servable_content = servable_config_import
servable_content += servable_config_declare_servable
servable_content += servable_config_preprocess_cast
servable_content += servable_config_method_add_common
servable_content += servable_config_method_add_cast
base.init_servable_with_servable_config(1, servable_content)
return base
def init_str_servable():
base = ServingTestBase()
servable_content = servable_config_import
servable_content += servable_config_declare_servable
servable_content += r"""
def preprocess(other):
return np.ones([2,2], np.float32), np.ones([2,2], np.float32)
def str_concat_postprocess(text1, text2):
print("text1", text1, "text2", text2)
return text1 + text2
@register.register_method(output_names=["text"])
def str_concat(text1, text2):
text = register.add_stage(str_concat_postprocess, text1, text2, outputs_count=1)
return text
def str_empty_postprocess(text1, text2):
if len(text1) == 0:
text = text2
else:
text = ""
return text
@register.register_method(output_names=["text"])
def str_empty(text1, text2):
text = register.add_stage(str_empty_postprocess, text1, text2, outputs_count=1)
return text
"""
base.init_servable_with_servable_config(1, servable_content)
return base
def init_bytes_servable():
base = ServingTestBase()
servable_content = servable_config_import
servable_content += servable_config_declare_servable
servable_content += r"""
def preprocess(other):
return np.ones([2,2], np.float32), np.ones([2,2], np.float32)
def bytes_concat_process(text1, text2):
text1 = bytes.decode(text1.tobytes()) # bytes decode to str
text2 = bytes.decode(text2.tobytes()) # bytes decode to str
return str.encode(text1 + text2) # str encode to bytes
@register.register_method(output_names=["text"])
def bytes_concat(text1, text2):
text = register.add_stage(bytes_concat_process, text1, text2, outputs_count=1)
return text
def bytes_empty_process(text1, text2):
text1 = bytes.decode(text1.tobytes()) # bytes decode to str
text2 = bytes.decode(text2.tobytes()) # bytes decode to str
if len(text1) == 0:
text = text2
else:
text = ""
return str.encode(text) # str encode to bytes
@register.register_method(output_names=["text"])
def bytes_empty(text1, text2):
text = register.add_stage(bytes_empty_process, text1, text2, outputs_count=1)
return text
"""
base.init_servable_with_servable_config(1, servable_content)
return base
def init_bool_int_float_servable():
base = ServingTestBase()
servable_content = servable_config_import
servable_content += servable_config_declare_servable
servable_content += r"""
def bool_process(bool_val):
return ~bool_val
@register.register_method(output_names=["value"])
def bool_not(bool_val):
value = register.add_stage(bool_process, bool_val, outputs_count=1)
return value
def int_process(int_val):
return int_val + 1
@register.register_method(output_names=["value"])
def int_plus_1(int_val):
value = register.add_stage(int_process, int_val, outputs_count=1)
return value
def float_process(float_val):
value = (float_val + 1).astype(float_val.dtype) # also support float16 input and output
return value
@register.register_method(output_names=["value"])
def float_plus_1(float_val):
value = register.add_stage(float_process, float_val, outputs_count=1)
return value
"""
base.init_servable_with_servable_config(1, servable_content)
return base
def start_serving_server(servable_content, model_file="tensor_add.mindir", version_number=1, start_version_number=None,
device_ids=0, num_parallel_workers=0, device_type=None):
base = ServingTestBase()
base.init_servable_with_servable_config(version_number, servable_content, model_file=model_file)
if start_version_number is None:
start_version_number = version_number
server.start_servables(server.ServableStartConfig(base.servable_dir, base.servable_name, device_ids=device_ids,
version_number=start_version_number,
num_parallel_workers=num_parallel_workers,
device_type=device_type))
server.start_grpc_server("0.0.0.0:5500")
return base
| StarcoderdataPython |
12823469 | codigo, quant_KWh = input().split()
somatorio_consumos = soma_media = cont_media = 0
while codigo != '0':
codigo, quant_KWh = int(codigo), int(quant_KWh)
if codigo == 1: # Residencial
if quant_KWh <= 200:
preco_consumo = quant_KWh * 0.60
else:
preco_consumo = quant_KWh * 0.85
soma_media += quant_KWh
cont_media += 1
if codigo == 2: # Comercial
if quant_KWh <= 800:
preco_consumo = quant_KWh * 0.72
else:
preco_consumo = quant_KWh * 0.83
soma_media += quant_KWh
cont_media += 1
if codigo == 3: # Industrial
if quant_KWh <= 3000:
preco_consumo = quant_KWh * 0.75
else:
preco_consumo = quant_KWh * 0.8
somatorio_consumos += quant_KWh
print(f'{preco_consumo:.2f}')
codigo, quant_KWh = input().split()
print(somatorio_consumos, end=' ')
if soma_media > 0:
print(f'{soma_media/cont_media:.0f}')
else:
print(0)
| StarcoderdataPython |
308390 | import unittest
import time
import logging
from financescraper.datacontainer import circular_buffer
class TestCircularBuffer(unittest.TestCase):
def setUp(self):
self.buffer = circular_buffer.CircularBuffer(2, 1)
logging.disable(logging.ERROR)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_buffer_init(self):
self.assertEqual(self.buffer.max_size, 2)
self.assertEqual(self.buffer.max_holding_time, 1)
self.assertListEqual(self.buffer.key_list, [])
self.assertDictEqual(self.buffer.dictionary, {})
self.assertDictEqual(self.buffer.timestamps, {})
def test_buffer_set_max_size(self):
self.buffer.set_size(4)
self.assertEqual(self.buffer.max_size, 4)
def test_buffer_set_max_holding_time(self):
self.buffer.set_holding_time(4)
self.assertEqual(self.buffer.max_holding_time, 4)
def test_buffer_timestamp(self):
self.buffer.add('A', 1)
self.assertAlmostEqual(self.buffer.timestamps['A'], time.time(), 3)
def test_buffer_add_element(self):
obj = {
'A': 1,
'B': 2
}
self.buffer.add('A', obj['A'])
self.assertDictEqual(self.buffer.dictionary, {'A': obj['A']})
self.assertListEqual(self.buffer.key_list, ['A'])
self.buffer.add('B', obj['B'])
self.assertDictEqual(self.buffer.dictionary, obj)
self.assertListEqual(self.buffer.key_list, ['A', 'B'])
def test_buffer_get_element(self):
obj = {
'A': 1,
'B': 2
}
self.buffer.add('A', obj['A'])
self.buffer.add('B', obj['B'])
self.assertEqual(self.buffer.get('A'), obj['A'])
self.assertNotEqual(self.buffer.get('A'), obj['B'])
def test_buffer_delete_element(self):
obj = {
'A': 1,
'B': 2
}
expected = {'B': 2}
self.buffer.add('A', obj['A'])
self.buffer.add('B', obj['B'])
self.buffer.delete('A')
self.assertDictEqual(self.buffer.dictionary, expected)
def test_buffer_clear(self):
obj = {
'A': 1,
'B': 2
}
expected = {}
self.buffer.add('A', obj['A'])
self.buffer.add('B', obj['B'])
self.buffer.clear()
self.assertDictEqual(self.buffer.dictionary, expected)
def test_buffer_overflow(self):
obj = {
'A': 1,
'B': 2,
'C': 3
}
expected = {
'B': 2,
'C': 3
}
self.buffer.add('A', obj['A'])
self.buffer.add('B', obj['B'])
self.buffer.add('C', obj['C'])
self.assertDictEqual(self.buffer.dictionary, expected)
self.assertNotEqual(self.buffer.dictionary, obj)
def test_buffer_refresh(self):
obj = {
'A': 1,
'B': 2,
'C': 3
}
expected = {
'A': 1,
'C': 3
}
self.buffer.add('A', obj['A'])
self.buffer.add('B', obj['B'])
self.buffer.refresh('A')
self.buffer.add('C', obj['C'])
self.assertDictEqual(self.buffer.dictionary, expected)
self.assertNotEqual(self.buffer.dictionary, obj)
def test_old_data_delete(self):
self.buffer.add('A', 'Value')
self.assertIsNotNone(self.buffer.get('A'))
time.sleep(1)
self.assertIsNone(self.buffer.get('A'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
313201 | <gh_stars>10-100
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ..classes import Simulation, PeriodicTestGrid, NonperiodicTestGrid
from ..visualization.time_snapshots import FieldPlot, CurrentPlot
@pytest.fixture(params=(64, 128, 256, 512))
def _NG(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 10 * np.pi, 1000))
def _L(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 10 * np.pi, 1000))
def _test_charge_density(request):
return request.param
@pytest.fixture(params=(1, 2 * np.pi, 7.51))
def __t(request):
return request.param
def test_PoissonSolver(_NG, _L):
g = PeriodicTestGrid(1, _L, _NG)
charge_density = (2 * np.pi / _L) ** 2 * np.sin(2 * g.x * np.pi / _L)
field = np.zeros((_NG + 2, 3))
field[1:-1, 0] = -2 * np.pi / _L * np.cos(2 * np.pi * g.x / _L)
g.charge_density[:-1] = charge_density
g.init_solve()
def plots():
fig, axes = plt.subplots(2)
ax0, ax1 = axes
ax0.plot(g.x, charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(g.x, g.electric_field[1:-1], "r-", label="Fourier")
ax1.plot(g.x, field, "g-", label="Analytic")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver failed! calc/theory field ratio at 0: {}".format(g.electric_field[1] / field[0])
assert np.allclose(g.electric_field, field), plots()
# def test_PoissonSolver_complex(debug=DEBUG):
# L = 1
# N = 32 * 2**5
# epsilon_0 = 1
# x, dx = np.linspace(0, L, N, retstep=True, endpoint=False)
# anal_potential = lambda x: np.sin(x * 2 * np.pi) + 0.5 * \
# np.sin(x * 6 * np.pi) + 0.1 * np.sin(x * 20 * np.pi)
# anal_field = lambda x: -(2 * np.pi * np.cos(x * 2 * np.pi) + 3 * np.pi *
# np.cos(x * 6 * np.pi) + 20 * np.pi * 0.1 * np.cos(x * 20 * np.pi))
# charge_density_anal = lambda x: ((2 * np.pi)**2 * np.sin(x * 2 * np.pi) + 18 * np.pi**2 * np.sin(
# x * 6 * np.pi) + (20 * np.pi)**2 * 0.1 * np.sin(x * 20 * np.pi)) * epsilon_0
#
# NG = 32
# g = Frame(L, NG, epsilon_0)
# # indices_in_denser_grid = np.searchsorted(x, g.x)
# g.charge_density = charge_density_anal(g.x)
# energy_fourier = g.init_solver_fourier()
# energy_direct = 0.5 * (g.electric_field**2).sum() * g.dx
# print("dx", dx, "fourier", energy_fourier, "direct", energy_direct, energy_fourier / energy_direct)
#
# def plots():
# fig, xspace = plt.subplots()
# xspace.set_title(
# r"Solving the Poisson equation $\Delta \psi = \rho / \epsilon_0$ via Fourier transform")
# xspace.plot(g.x, g.charge_density, "ro--", label=r"$\rho$")
# xspace.plot(x, charge_density_anal(x), "r-", lw=6, alpha=0.5, label=r"$\rho_a$")
# xspace.plot(g.x, g.potential, "go--", label=r"$V$")
# xspace.plot(x, anal_potential(x), "g-", lw=6, alpha=0.5, label=r"$V_a$")
# xspace.plot(g.x, g.electric_field, "bo--", alpha=0.5, label=r"$E$")
# EplotAnal, = xspace.plot(x, anal_field(x), "b-", lw=6, alpha=0.5, label=r"$E_a$")
# xspace.set_xlim(0, L)
# xspace.set_xlabel("$x$")
# xspace.grid()
# xspace.legend(loc='best')
#
# fig2, fspace = plt.subplots()
# fspace.plot(g.k_plot, g.energy_per_mode, "bo--", label=r"electric energy $\rho_F V_F^\dagger$")
# fspace.set_xlabel("k")
# fspace.set_ylabel("mode energy")
# fspace.set_title("Fourier space")
# fspace.grid()
# fspace.legend(loc='best')
# plt.show()
# return "test_PoissonSolver_complex failed!"
#
# energy_correct = np.isclose(energy_fourier, energy_direct)
# field_correct = np.isclose(g.electric_field, anal_field(g.x)).all()
# potential_correct = np.isclose(g.potential, anal_potential(g.x)).all()
# assert field_correct and potential_correct and energy_correct, plots()
def test_PoissonSolver_energy_sine(_NG, ):
_L = 1
resolution_increase = _NG
N = _NG * resolution_increase
epsilon_0 = 1
x, dx = np.linspace(0, _L, N, retstep=True, endpoint=False)
anal_field = np.zeros((N, 3))
anal_field[:, 0] = -(2 * np.pi * np.cos(x * 2 * np.pi / _L))
charge_density_anal = ((2 * np.pi) ** 2 * np.sin(x * 2 * np.pi))
g = PeriodicTestGrid(1, _L, _NG, epsilon_0)
indices_in_denser_grid = np.searchsorted(x, g.x)
g.charge_density[:-1] = charge_density_anal[indices_in_denser_grid] # / resolution_increase
g.init_solve()
g.save_field_values(0)
g.postprocess()
energy_fourier = g.grid_energy_history[0]
energy_direct = g.direct_energy_calculation()
print("dx", dx, "fourier", energy_fourier, "direct", energy_direct, energy_fourier / energy_direct)
def plots():
fig, xspace = plt.subplots()
xspace.set_title(
r"Solving the Poisson equation $\Delta \psi = \rho / \epsilon_0$ via Fourier transform")
xspace.plot(g.x, g.charge_density, "ro--", label=r"$\rho$")
xspace.plot(x, charge_density_anal, "r-", lw=6, alpha=0.5, label=r"$\rho_a$")
xspace.plot(g.x, g.electric_field, "bo--", alpha=0.5, label=r"$E$")
xspace.plot(x, anal_field, "b-", lw=6, alpha=0.5, label=r"$E_a$")
xspace.set_xlim(0, _L)
xspace.set_xlabel("$x$")
xspace.grid()
xspace.legend(loc='best')
fig2, fspace = plt.subplots()
fspace.plot(g.k_plot, g.energy_per_mode, "bo--", label=r"electric energy $\rho_F V_F^\dagger$")
fspace.set_xlabel("k")
fspace.set_ylabel("mode energy")
fspace.set_title("Fourier space")
fspace.grid()
fspace.legend(loc='best')
plt.show()
return "test_PoissonSolver_complex failed!"
energy_correct = np.allclose(energy_fourier, energy_direct)
assert energy_correct, plots()
field_correct = np.allclose(g.electric_field[1:-1, 0], anal_field[indices_in_denser_grid][:, 0])
assert field_correct, plots()
def test_PoissonSolver_sheets(_NG, _L, _test_charge_density=1):
epsilon_0 = 1
x, dx = np.linspace(0, _L, _NG, retstep=True, endpoint=False)
charge_density = np.zeros_like(x)
region1 = (_L * 1 / 8 < x) * (x < _L * 2 / 8)
region2 = (_L * 5 / 8 < x) * (x < _L * 6 / 8)
charge_density[region1] = _test_charge_density
charge_density[region2] = -_test_charge_density
g = PeriodicTestGrid(1, _L, _NG, epsilon_0)
g.charge_density[:-1] = charge_density
g.init_solve()
def plots():
fig, axes = plt.subplots(3)
ax0, ax1 = axes
ax0.plot(x, charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(x, g.electric_field, "r-")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver_sheets failed!"
polynomial_coefficients = np.polyfit(x[region1], g.electric_field[1:-1, 0][region1], 1)
first_bump_right = np.isclose(
polynomial_coefficients[0], _test_charge_density, rtol=1e-2)
assert first_bump_right, plots()
polynomial_coefficients = np.polyfit(x[region2], g.electric_field[1:-1, 0][region2], 1)
second_bump_right = np.isclose(
polynomial_coefficients[0], -_test_charge_density, rtol=1e-2)
assert second_bump_right, plots()
def test_PoissonSolver_ramp(_NG, _L):
""" For a charge density rho = Ax + B
d2phi/dx2 = -rho/epsilon_0
set epsilon_0 to 1
d2phi/dx2 = Ax
phi must be of form
phi = -Ax^3/6 + Bx^2 + Cx + D"""
a = 1
# noinspection PyArgumentEqualDefault
g = PeriodicTestGrid(1, _L, _NG, epsilon_0=1)
g.charge_density[:-1] = a * g.x
g.init_solve()
field = a * (g.x - _L / 2) ** 2 / 2
def plots():
fig, axes = plt.subplots(2)
ax0, ax1 = axes
ax0.plot(g.x, g.charge_density)
ax0.set_title("Charge density")
ax1.set_title("Field")
ax1.plot(g.x, g.electric_field, "r-")
ax1.plot(g.x, field, "g-")
for ax in axes:
ax.grid()
ax.legend()
plt.show()
return "test_PoissonSolver_ramp failed!"
polynomial_coefficients = np.polyfit(g.x, g.electric_field[1:-1, 0], 2)
assert np.isclose(polynomial_coefficients[0], a / 2, rtol=1e-2), (polynomial_coefficients[0], a / 2, plots())
def test_BunemanSolver(__t, _NG, _L, _test_charge_density):
g = NonperiodicTestGrid(__t, _L, _NG)
charge_index = _NG // 2
g.current_density_x[charge_index] = _test_charge_density
g.solve()
g.save_field_values(0)
S = Simulation(g)
pulled_field = g.electric_field[charge_index, 0]
expected_field = - g.dt / g.epsilon_0 * _test_charge_density
def plot():
fig, (ax1, ax2) = plt.subplots(2)
CurrentPlot(S, ax1, 0).update(0)
FieldPlot(S, ax2, 0).update(0)
plt.show()
assert np.isclose(pulled_field, expected_field), plot()
def test_BunemanSolver_charge(__t, _NG, _L, _test_charge_density):
g = NonperiodicTestGrid(__t, _L, _NG)
v = 0.5
g.current_density_x[1:-2] = v * _test_charge_density
g.solve()
g.save_field_values(0)
S = Simulation(g).postprocess()
def plot():
fig, (ax1, ax2) = plt.subplots(2)
CurrentPlot(S, ax1, 0).update(0)
FieldPlot(S, ax2, 0).update(0)
plt.show()
assert np.allclose(g.electric_field[1:-1,0], -v * _test_charge_density * g.dt / g.epsilon_0), plot()
| StarcoderdataPython |
3207137 | <reponame>Elen-T/python_training<filename>data/contacts.py<gh_stars>0
# ะพัะดะตะปัะฝัะน ะฟะฐะบะตั ัะฐะฑะพัั ั ัะตััะพะฒัะผะธ ะดะฐะฝะฝัะผะธ ะดะปั ัะตััะฐ ะดะพะฑะฐะฒะปะตะฝะธะต ะบะพะฝัะฐะบัะฐ
from model.contacts import Contacts
testdata = [
Contacts(firstname="firstname1", middlename="middlename1", lastname="lastname1", nickname="nickname1"),
Contacts(firstname="firstname2", middlename="middlename2", lastname="lastname2", nickname="nickname2"),
]
| StarcoderdataPython |
9712645 | #!/usr/bin/python3
from storpool.charms.manage import __main__ as spmain
spmain.main()
| StarcoderdataPython |
5104422 | <reponame>Nyquixt/multiview-human-pose-estimation-pytorch
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import h5py
import numpy as np
import torch
from core.config import get_model_name
from core.evaluate import accuracy
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
logger = logging.getLogger(__name__)
def routing(raw_features, aggre_features, is_aggre, meta):
if not is_aggre:
return raw_features
output = []
for r, a, m in zip(raw_features, aggre_features, meta):
view = torch.zeros_like(a)
batch_size = a.size(0)
for i in range(batch_size):
s = m['source'][i]
view[i] = a[i] if s == 'h36m' else r[i]
output.append(view)
return output
def train(config, data, model, criterion, optim, epoch, output_dir,
writer_dict):
is_aggre = config.NETWORK.AGGRE
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
avg_acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target, weight, meta) in enumerate(data):
data_time.update(time.time() - end)
raw_features, aggre_features = model(input)
output = routing(raw_features, aggre_features, is_aggre, meta)
loss = 0
target_cuda = []
for t, w, o in zip(target, weight, output):
t = t.cuda(non_blocking=True)
w = w.cuda(non_blocking=True)
target_cuda.append(t)
loss += criterion(o, t, w)
target = target_cuda
if is_aggre:
for t, w, r in zip(target, weight, raw_features):
t = t.cuda(non_blocking=True)
w = w.cuda(non_blocking=True)
loss += criterion(r, t, w)
optim.zero_grad()
loss.backward()
optim.step()
losses.update(loss.item(), len(input) * input[0].size(0))
nviews = len(output)
acc = [None] * nviews
cnt = [None] * nviews
pre = [None] * nviews
for j in range(nviews):
_, acc[j], cnt[j], pre[j] = accuracy(
output[j].detach().cpu().numpy(),
target[j].detach().cpu().numpy())
acc = np.mean(acc)
cnt = np.mean(cnt)
avg_acc.update(acc, cnt)
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
gpu_memory_usage = torch.cuda.memory_allocated(0)
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'Memory {memory:.1f}'.format(
epoch, i, len(data), batch_time=batch_time,
speed=len(input) * input[0].size(0) / batch_time.val,
data_time=data_time, loss=losses, acc=avg_acc, memory=gpu_memory_usage)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', avg_acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
for k in range(len(input)):
view_name = 'view_{}'.format(k + 1)
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'train'), view_name, i)
save_debug_images(config, input[k], meta[k], target[k],
pre[k] * 4, output[k], prefix)
def validate(config,
loader,
dataset,
model,
criterion,
output_dir,
writer_dict=None):
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
avg_acc = AverageMeter()
nsamples = len(dataset) * 4
is_aggre = config.NETWORK.AGGRE
njoints = config.NETWORK.NUM_JOINTS
height = int(config.NETWORK.HEATMAP_SIZE[0])
width = int(config.NETWORK.HEATMAP_SIZE[1])
all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)
all_heatmaps = np.zeros(
(nsamples, njoints, height, width), dtype=np.float32)
idx = 0
with torch.no_grad():
end = time.time()
for i, (input, target, weight, meta) in enumerate(loader):
raw_features, aggre_features = model(input)
output = routing(raw_features, aggre_features, is_aggre, meta)
loss = 0
target_cuda = []
for t, w, o in zip(target, weight, output):
t = t.cuda(non_blocking=True)
w = w.cuda(non_blocking=True)
target_cuda.append(t)
loss += criterion(o, t, w)
if is_aggre:
for t, w, r in zip(target, weight, raw_features):
t = t.cuda(non_blocking=True)
w = w.cuda(non_blocking=True)
loss += criterion(r, t, w)
target = target_cuda
nimgs = len(input) * input[0].size(0)
losses.update(loss.item(), nimgs)
nviews = len(output)
acc = [None] * nviews
cnt = [None] * nviews
pre = [None] * nviews
for j in range(nviews):
_, acc[j], cnt[j], pre[j] = accuracy(
output[j].detach().cpu().numpy(),
target[j].detach().cpu().numpy())
acc = np.mean(acc)
cnt = np.mean(cnt)
avg_acc.update(acc, cnt)
batch_time.update(time.time() - end)
end = time.time()
preds = np.zeros((nimgs, njoints, 3), dtype=np.float32)
heatmaps = np.zeros(
(nimgs, njoints, height, width), dtype=np.float32)
for k, o, m in zip(range(nviews), output, meta):
pred, maxval = get_final_preds(config,
o.clone().cpu().numpy(),
m['center'].numpy(),
m['scale'].numpy())
pred = pred[:, :, 0:2]
pred = np.concatenate((pred, maxval), axis=2)
preds[k::nviews] = pred
heatmaps[k::nviews] = o.clone().cpu().numpy()
all_preds[idx:idx + nimgs] = preds
all_heatmaps[idx:idx + nimgs] = heatmaps
idx += nimgs
if i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(loader), batch_time=batch_time,
loss=losses, acc=avg_acc)
logger.info(msg)
for k in range(len(input)):
view_name = 'view_{}'.format(k + 1)
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'validation'), view_name, i)
save_debug_images(config, input[k], meta[k], target[k],
pre[k] * 4, output[k], prefix)
# save heatmaps and joint locations
u2a = dataset.u2a_mapping
a2u = {v: k for k, v in u2a.items() if v != '*'}
a = list(a2u.keys())
u = np.array(list(a2u.values()))
save_file = config.TEST.HEATMAP_LOCATION_FILE
file_name = os.path.join(output_dir, save_file)
file = h5py.File(file_name, 'w')
file['heatmaps'] = all_heatmaps[:, u, :, :]
file['locations'] = all_preds[:, u, :]
file['joint_names_order'] = a
file.close()
name_value, perf_indicator = dataset.evaluate(all_preds)
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
_, full_arch_name = get_model_name(config)
logger.info('| Arch ' +
' '.join(['| {}'.format(name) for name in names]) + ' |')
logger.info('|---' * (num_values + 1) + '|')
logger.info('| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |')
return perf_indicator
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| StarcoderdataPython |
12843704 | TRAIN_SIZE = 23654
| StarcoderdataPython |
9735697 | <filename>polydown/__main__.py
import argparse
import datetime
from .cli import polycli
__version__ = "0.2.2"
ap = argparse.ArgumentParser()
ap.add_argument(
"asset_type",
type=str,
nargs="*",
help='"hdris, textures, models"',
)
ap.add_argument(
"-f",
"--folder",
action="store",
type=str,
default="",
help="target download folder.",
)
ap.add_argument(
"-c",
"--category",
nargs="?",
const="",
help="category to download.",
)
ap.add_argument(
"-s",
"--sizes",
nargs="+",
default=[],
help="size(s) of downloaded asset files. eg: 1k 2k 4k",
)
ap.add_argument(
"-it",
"--iters",
action="store",
type=int,
default=-1,
help="amount of iterations.",
)
# ap.add_argument(
# "-ff",
# "--file_format",
# action="store",
# type=str,
# help="target download folder.",
# )
ap.add_argument(
"-o",
"--overwrite",
action="store_true",
default=False,
help="Overwrite if the files already exists. otherwise the current task will be skipped.",
)
ap.add_argument(
"-no",
"--noimgs",
action="store_true",
default=False,
help="Do not download 'preview, render, thumbnail...' images.",
)
ap.add_argument("-v", "--version", action="version", version="%(prog)s v" + __version__)
args = ap.parse_args()
def cli():
if args.asset_type == []:
print("<asset_type> is required.")
exit(0)
execution_start_time = datetime.datetime.now()
try:
polycli(args)
except KeyboardInterrupt:
print("\nKeyboardInterrupt!")
print("Total runtime: {}".format(datetime.datetime.now() - execution_start_time))
if __name__ == "__main__":
try:
cli()
except KeyboardInterrupt:
print("\nKeyboardInterrupt!")
| StarcoderdataPython |
4868023 | """Example for solving pose graph optimization problems loaded from `.g2o` files.
For a summary of options:
python pose_graph_g2o.py --help
"""
import dataclasses
import enum
import pathlib
from typing import Dict
import dcargs
import jaxfg
import matplotlib.pyplot as plt
import _g2o_utils
class SolverType(enum.Enum):
GAUSS_NEWTON = enum.auto()
FIXED_ITERATION_GAUSS_NEWTON = enum.auto()
LEVENBERG_MARQUARDT = enum.auto()
DOGLEG = enum.auto()
def get_solver(self) -> jaxfg.solvers.NonlinearSolverBase:
"""Get solver corresponding to an enum."""
map: Dict[SolverType, jaxfg.solvers.NonlinearSolverBase] = {
SolverType.GAUSS_NEWTON: jaxfg.solvers.GaussNewtonSolver(),
SolverType.FIXED_ITERATION_GAUSS_NEWTON: jaxfg.solvers.FixedIterationGaussNewtonSolver(
unroll=False
),
SolverType.LEVENBERG_MARQUARDT: jaxfg.solvers.LevenbergMarquardtSolver(),
SolverType.DOGLEG: jaxfg.solvers.DoglegSolver(),
}
return map[self]
@dataclasses.dataclass
class CliArgs:
g2o_path: pathlib.Path = pathlib.Path(__file__).parent / "data/input_M3500_g2o.g2o"
"""Path to g2o file."""
solver_type: SolverType = SolverType.GAUSS_NEWTON
"""Nonlinear solver to use."""
def main():
# Parse CLI args
cli_args = dcargs.parse(CliArgs)
# Read graph
with jaxfg.utils.stopwatch("Reading g2o file"):
g2o: _g2o_utils.G2OData = _g2o_utils.parse_g2o(cli_args.g2o_path)
# Make factor graph
with jaxfg.utils.stopwatch("Making factor graph"):
graph = jaxfg.core.StackedFactorGraph.make(g2o.factors)
with jaxfg.utils.stopwatch("Making initial poses"):
initial_poses = jaxfg.core.VariableAssignments.make_from_dict(g2o.initial_poses)
# Time solver
if not isinstance(
cli_args.solver_type.get_solver(), jaxfg.solvers.FixedIterationGaussNewtonSolver
):
# `max_iterations` field exists for all solvers but the fixed iteration GN
with jaxfg.utils.stopwatch("Single-step JIT compile + solve"):
solution_poses = graph.solve(
initial_poses,
solver=dataclasses.replace(
cli_args.solver_type.get_solver(), max_iterations=1
),
)
solution_poses.storage.block_until_ready()
with jaxfg.utils.stopwatch("Single-step solve (already compiled)"):
solution_poses = graph.solve(
initial_poses,
solver=dataclasses.replace(
cli_args.solver_type.get_solver(), max_iterations=1
),
)
solution_poses.storage.block_until_ready()
with jaxfg.utils.stopwatch("Full solve"):
solution_poses = graph.solve(
initial_poses, solver=cli_args.solver_type.get_solver()
)
solution_poses.storage.block_until_ready()
# Plot
plt.figure()
# Visualize 2D poses
if isinstance(
next(iter(solution_poses.get_variables())), jaxfg.geometry.SE2Variable
):
plt.plot(
*(
initial_poses.get_stacked_value(jaxfg.geometry.SE2Variable)
.translation()
.T
),
# Equivalent:
# *(onp.array([initial_poses.get_value(v).translation() for v in pose_variables]).T),
c="r",
label="Initial",
)
plt.plot(
*(
solution_poses.get_stacked_value(jaxfg.geometry.SE2Variable)
.translation()
.T
),
# Equivalent:
# *(onp.array([solution_poses.get_value(v).translation() for v in pose_variables]).T),
c="b",
label="Optimized",
)
# Visualize 3D poses
elif isinstance(
next(iter(solution_poses.get_variables())), jaxfg.geometry.SE3Variable
):
ax = plt.axes(projection="3d")
ax.set_box_aspect((1, 1, 1))
ax.plot3D(
*(
initial_poses.get_stacked_value(jaxfg.geometry.SE3Variable)
.translation()
.T
),
c="r",
label="Initial",
)
ax.plot3D(
*(
solution_poses.get_stacked_value(jaxfg.geometry.SE3Variable)
.translation()
.T
),
c="b",
label="Optimized",
)
else:
assert False
plt.title(f"Optimization on {cli_args.g2o_path.stem}")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
294209 | import PySimpleGUI as sg
import global_variables
import get_pdfs
import get_json
import login
import os
import pandas as pd
import requests
import json
CURRENT_DIR = os.path.dirname(__file__)
# This function makes it so when the user selects file paths for the csv, json files, and pdf files,
# their choices will be saved to the global variables in modules/global_variables.py, that way these paths
# can be referenced from anywhere in the program.
def declare_globals(event, values):
global_variables.CSV_INPUT_PATH = values['pathCSV']
global_variables.JSON_INPUT_OUTPUT_PATH = values['pathJSON']
global_variables.PDF_OUTPUT_PATH = values['pathPDF']
print(f"CSV GLOBAL = {global_variables.CSV_INPUT_PATH}\nJSON GLOBAL = {global_variables.JSON_INPUT_OUTPUT_PATH}\nPDF GLOBAL = {global_variables.PDF_OUTPUT_PATH}")
# The first layout is the screen where you choose your paths and select what files you want to download.
layout = [
[sg.Txt('Input CSV')],
[sg.Input(os.path.abspath(os.path.join(CURRENT_DIR,"csv", "input.csv")),size=(50,1), key="pathCSV"), sg.FileBrowse("Browse", key="browseCSV", initial_folder="csv", file_types=(("CSV Files", "*.csv"),))],
[sg.Txt('JSON Directory')],
[sg.Input(os.path.abspath("json-output"),size=(50,1), key="pathJSON"), sg.FolderBrowse("Browse", key="browseJSON",initial_folder="json-output")],
[sg.Txt('PDF Directory')],
[sg.Input(os.path.abspath("pdf-output"),size=(50,1), key="pathPDF"), sg.FolderBrowse("Browse", key="browsePDF", initial_folder="pdf-output")],
[sg.Button("Get JSON & PDFs", key="getJSON_PDF"), sg.Button("Get JSON", key="getJSON"), sg.Button("Get PDFs", key="getPDF")],
[sg.Image("img/spinner.gif", key="spinner")]
]
# This layout is for the login screen and asks the user for their username, password, and client_matter.
loginLayout = [
[sg.Txt("Log in to Docket Alarm")],
[sg.Txt("Username:"), sg.Input(size=(50,1), key="username")],
[sg.Txt("Password:"), sg.Input(size=(50,1), key="password")],
[sg.Txt("Client Matter:"), sg.Input(size=(50,1), key="clientMatter")],
[sg.Button("Submit", key="submit")]
]
# This code assigns the layout to their windows. This does not pull up the windows automatically.
# .read() must be called on these to open them. This merely selects the text in the window header for each
# window we might open in the program.
window = sg.Window('Bulk-Docket-Pull', layout)
loginWindow = sg.Window("Log In", loginLayout)
def display_login_window():
"""
Displays the window to log in to Docket Alarm.
Prompts the user for their username, password, and client matter.
"""
while True:
# Read() opens the window of our choice.
# Event is the key for the button pressed. We can make logic around this to give buttons their abilities.
# Values is a dictionary containing any information the user entered into the program.
event, values = loginWindow.read()
# An attempt at animating a loading spinner when PDFs are downloading.
window.Element('spinner').UpdateAnimation("img/spinner.gif", time_between_frames=50)
# if the submit button in this window is pressed...
if event == "submit":
# We ready up the api endpoint for logging in to Docket Alarm...
login_url = "https://www.docketalarm.com/api/v1/login/"
# We ready up the parameters to send to the login endpoint, with the values the user specified...
data = {
'username':values['username'],
'password':values['password'],
'client_matter':values['clientMatter'],
}
# We send the parameters to the API endpoint, storing the resulting json data in a variable...
result = requests.post(login_url, data=data)
# We convert the json data from the result of the API call to a python dictionary, making it
# easier to work with.
result_json = result.json()
# We check if the login is a success.
# If it is not...
if result_json['success'] != True:
# We display a popup letting the user know, returning them back to the sign in form.
sg.popup_error("Invalid Username or Password.")
else:
# If it is a success...
# We save the login info locally to a pickle file so they won't have to log in again next time they use
# the script.
login.store_user_info_locally(values['username'], values['password'], values['clientMatter'])
# We let them know their login was successful.
sg.popup_ok("Login successful!")
# We close the login window...
loginWindow.close()
# And bring the user to the main window, where they can select their paths, and choose which data to download.
display_main_window()
def display_main_window():
"""
Displays the main window of the program.
This window prompts the user for the paths for their CSV, their PDF files, and their JSON files.
"""
while True:
# Read() opens the window of our choice.
# Event is the key for the button pressed. We can make logic around this to give buttons their abilities.
# Values is a dictionary containing any information the user entered into the program.
event, values = window.read()
print(event, values) # For debugging - Prints buttons pressed, and values returned to the console.
# If the user selects the 'Get JSON & PDFs' button, we run the function that gets JSON and PDF files.
if event == "getJSON_PDF":
# Sets the path choices specified as the global variables that can be used throughout the whole program
# to reference their choice.
declare_globals(event, values)
# Downloads JSON files and PDF files.
main.get_json_and_pdfs()
# If the user selects the 'Get JSON' button, we run the function that gets JSON files.
if event == "getJSON":
# Sets the path choices specified as the global variables that can be used throughout the whole program
# to reference their choice.
declare_globals(event, values)
# Downloads JSON files.
get_json.loop_dataframe()
# If the user selects the 'Get PDFs' button, we run the function that gets the PDF files.
# (JSON files must be downloaded first to use this option.)
if event == "getPDF":
# Sets the path choices specified as the global variables that can be used throughout the whole program
# to reference their choice.
declare_globals(event, values)
# Gets all the links to PDF files from within the json files in the directory the user specified.
# This is a list of tuples.
link_list = get_pdfs.get_urls("json-output")
# Downloads all of the PDF files. Takes the link list from above as an argument.
get_pdfs.thread_download_pdfs(link_list)
# If the user closes the window with the "X" in the corner...
if event == sg.WIN_CLOSED:
# Close the window.
break
def gui_run():
"""
Used for running the GUI.
Checks to see if valid user login information is stored on the local machine.
If it is not, it prompts the user to log in.
If it is, it logs the user in and opens directly to the main window where the
user can select their filepaths and what files they want to download.
"""
# If there is no file stored locally containing valid login credentials...
if not os.path.isfile(os.path.join(CURRENT_DIR, "sav", "credentials.pickle")):
# Prompt the user to enter their login info.
display_login_window()
# If there is a file stored locally containing valid login credentials...
else:
# Bring the user to the main window of the program.
display_main_window()
# If this file is run directly...
if __name__ == "__main__":
# Crunch the logic to see if the user has logged in successfully before,
# and open the correct window for them.
gui_run() | StarcoderdataPython |
5166430 | <reponame>mkitto/benchmarks
import os
import platform
import socket
import sys
UPPER_BOUND = 5000000
PREFIX = 32338
class Node:
def __init__(self):
self.children = {}
self.terminal = False
class Sieve:
def __init__(self, limit):
self.limit = limit
self.prime = [False] * (limit + 1)
def to_list(self):
result = [2, 3]
for p in range(5, self.limit + 1):
if self.prime[p]:
result.append(p)
return result
def omit_squares(self):
r = 5
while r * r < self.limit:
if self.prime[r]:
i = r * r
while i < self.limit:
self.prime[i] = False
i = i + r * r
r = r + 1
return self
def step1(self, x, y):
n = (4 * x * x) + (y * y)
if n <= self.limit and (n % 12 == 1 or n % 12 == 5):
self.prime[n] = not self.prime[n]
def step2(self, x, y):
n = (3 * x * x) + (y * y)
if n <= self.limit and n % 12 == 7:
self.prime[n] = not self.prime[n]
def step3(self, x, y):
n = (3 * x * x) - (y * y)
if x > y and n <= self.limit and n % 12 == 11:
self.prime[n] = not self.prime[n]
def loop_y(self, x):
y = 1
while y * y < self.limit:
self.step1(x, y)
self.step2(x, y)
self.step3(x, y)
y = y + 1
def loop_x(self):
x = 1
while x * x < self.limit:
self.loop_y(x)
x = x + 1
def calc(self):
self.loop_x()
return self.omit_squares()
def generate_trie(l):
root = Node()
for el in l:
head = root
for ch in str(el):
if ch not in head.children:
head.children[ch] = Node()
head = head.children[ch]
head.terminal = True
return root
def find(upper_bound, prefix):
primes = Sieve(upper_bound).calc()
str_prefix = str(prefix)
head = generate_trie(primes.to_list())
for ch in str_prefix:
head = head.children.get(ch)
if head is None:
return None
queue, result = [(head, str_prefix)], []
while queue:
top, prefix = queue.pop()
if top.terminal:
result.append(int(prefix))
for ch, v in top.children.items():
queue.insert(0, (v, prefix + ch))
return result
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(("localhost", 9001)):
s.sendall(bytes(msg, "utf8"))
def verify():
left = [2, 23, 29]
right = find(100, 2)
if left != right:
print("%s != %s" % (left, right), file=sys.stderr)
quit(1)
if __name__ == "__main__":
verify()
notify("%s\t%d" % (platform.python_implementation(), os.getpid()))
results = find(UPPER_BOUND, PREFIX)
notify("stop")
print(results)
| StarcoderdataPython |
371920 | <filename>x_spam.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import timeit
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import containers
from keras.layers.core import Dense, AutoEncoder
from keras.layers.noise import GaussianNoise
from sklearn.metrics import (precision_score, recall_score, auc,
f1_score, accuracy_score, roc_curve,
confusion_matrix, matthews_corrcoef)
from spam.common import utils
from spam.deeplearning import LossHistory
from spam.preprocess import Preprocess
start_time = timeit.default_timer()
np.random.seed(1337)
exp_num = 100
preprocess_params = {
'max_words': 1000,
'max_len': 800,
'mode': 'tfidf',
'read_csv': True,
'read_csv_filepath': 'data/csv/clean_dataset.csv',
'classes': 2,
}
epochs = 50
batch_size = 128
classes = 2
hidden_layers = [800, 500, 300, ]
noise_layers = [0.6, 0.4, ]
pretr_activ = 'sigmoid'
pretr_opt = 'adadelta'
pretr_loss = 'mse'
fine_activ = 'softmax'
fine_opt = 'adadelta'
fine_loss = 'categorical_crossentropy'
clean = lambda words: [str(word)
for word in words
if type(word) is not float]
print('\n{}\n'.format('-' * 50))
print('Reading the dataset..')
preprocessor = Preprocess(**preprocess_params)
print('Spliting the dataset..')
enron_dataset = preprocessor.dataset
enron_dataset = utils.split_dataset(x=enron_dataset['body'].values,
y=enron_dataset['label'].values)
print('Transforming dataset into vectors and matrices..')
enron_dataset = preprocessor.transform(dataset=enron_dataset)
vocabulary = preprocessor.vocabulary
print('\n{}\n'.format('-' * 50))
print('Building model..')
encoders = []
noises = []
pretraining_history = []
input_data = np.copy(enron_dataset.unlabel)
print('Pretraining model..')
for i, (n_in, n_out) in enumerate(zip(
hidden_layers[:-1], hidden_layers[1:]), start=1):
print('Training layer {}: {} Layers -> {} Layers'
.format(i, n_in, n_out))
ae = Sequential()
encoder = containers.Sequential([
GaussianNoise(noise_layers[i - 1], input_shape=(n_in,)),
Dense(input_dim=n_in, output_dim=n_out,
activation=pretr_activ, init='uniform'),
])
decoder = Dense(input_dim=n_out, output_dim=n_in,
activation=pretr_activ)
ae.add(AutoEncoder(encoder=encoder, decoder=decoder,
output_reconstruction=False))
ae.compile(loss=pretr_loss, optimizer=pretr_opt)
temp_history = LossHistory()
ae.fit(input_data, input_data, batch_size=batch_size,
nb_epoch=epochs, callbacks=[temp_history])
pretraining_history += temp_history.losses
encoders.append(ae.layers[0].encoder.layers[1])
noises.append(ae.layers[0].encoder.layers[0])
input_data = ae.predict(input_data)
model = Sequential()
for encoder, noise in zip(encoders, noises):
model.add(noise)
model.add(encoder)
model.add(Dense(input_dim=hidden_layers[-1], output_dim=classes,
activation=fine_activ))
model.compile(loss=fine_loss, optimizer=fine_opt)
print('\n{}\n'.format('-' * 50))
print('Finetuning the model..')
finetune_history = LossHistory()
model.fit(
enron_dataset.train.X, enron_dataset.train.Y,
batch_size=batch_size,
nb_epoch=epochs, show_accuracy=True,
validation_data=(enron_dataset.test.X, enron_dataset.test.Y),
validation_split=0.1,
callbacks=[finetune_history],
)
print('\n{}\n'.format('-' * 50))
print('Evaluating model..')
y_pred = model.predict_classes(enron_dataset.test.X)
metrics = {}
data_meta = {}
data_meta['unlabeled_count'] = len(enron_dataset.unlabel)
data_meta['labeled_count'] = \
len(enron_dataset.train.X) + len(enron_dataset.test.X)
data_meta['train_data'] = {}
data_meta['test_data'] = {}
data_meta['train_data']['spam_count'] = int(sum(enron_dataset.train.y))
data_meta['train_data']['ham_count'] = \
int(len(enron_dataset.train.y) - sum(enron_dataset.train.y))
data_meta['train_data']['total_count'] = \
data_meta['train_data']['spam_count'] + \
data_meta['train_data']['ham_count']
data_meta['test_data']['spam_count'] = int(sum(enron_dataset.test.y))
data_meta['test_data']['ham_count'] = \
int(len(enron_dataset.test.y) - sum(enron_dataset.test.y))
data_meta['test_data']['total_count'] = \
data_meta['test_data']['spam_count'] + \
data_meta['test_data']['ham_count']
conf_matrix = confusion_matrix(enron_dataset.test.y, y_pred)
metrics['true_positive'], metrics['true_negative'], \
metrics['false_positive'], metrics['false_negative'] = \
int(conf_matrix[0][0]), int(conf_matrix[1][1]), \
int(conf_matrix[0][1]), int(conf_matrix[1][0])
false_positive_rate, true_positive_rate, _ = \
roc_curve(enron_dataset.test.y, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
metrics['accuracy'] = accuracy_score(enron_dataset.test.y, y_pred)
metrics['precision'] = precision_score(enron_dataset.test.y, y_pred)
metrics['recall'] = recall_score(enron_dataset.test.y, y_pred)
metrics['f1'] = f1_score(enron_dataset.test.y, y_pred)
metrics['mcc'] = matthews_corrcoef(enron_dataset.test.y, y_pred)
metrics['auc'] = roc_auc
for key, value in metrics.items():
print('{}: {}'.format(key, value))
print('\n{}\n'.format('-' * 50))
exp_dir = 'experiments/exp_{}'.format(exp_num)
print('Saving config results inside {}'.format(exp_dir))
os.makedirs(exp_dir, exist_ok=True)
open('{}/model_structure.json'.format(exp_dir), 'w') \
.write(model.to_json())
model.save_weights('{}/model_weights.hdf5'
.format(exp_dir), overwrite=True)
with open('{}/metrics.json'.format(exp_dir), 'w') as f:
json.dump(metrics, f, indent=4)
with open('{}/data_meta.json'.format(exp_dir), 'w') as f:
json.dump(data_meta, f, indent=4)
with open('{}/vocabulary.json'.format(exp_dir), 'w') as f:
json.dump(vocabulary, f)
plt.figure(1)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate, 'b',
label='AUC = {}'.format(roc_auc))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([-0.1, 1.2])
plt.ylim([-0.1, 1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('{}/roc_curve.png'.format(exp_dir))
# TODO: add labels to loss history
plt.figure(2)
plt.title('Pretraining loss history')
plt.plot(pretraining_history)
plt.savefig('{}/pretraining_loss.png'.format(exp_dir))
plt.figure(3)
plt.title('Finetune loss history')
plt.plot(finetune_history.losses)
plt.savefig('{}/finetune_loss.png'.format(exp_dir))
end_time = timeit.default_timer()
print('Done!')
print('Run for %.2fm' % ((end_time - start_time) / 60.0))
| StarcoderdataPython |
6492427 | <reponame>arajajyothibabu/PythonLearning
__author__ = 'Kalyan'
max_marks = 35 # 15 marks for encode and 20 for decode
problem_notes ='''
This problem deals with number conversion into a custom base 5 notation and back.
In this notation, the letters a to e are used for digits 0 to 4.
E.g. decimal 10 in this custom base 5 notation is "ca", decimal 5 is "ba" etc.
Your job is to write encoding and decoding (both) routines to deal with this notation.
'''
# Notes:
# - If number is not a valid int or long raise TypeError
# - Negative numbers should result in a - prefix to the result similar to how bin works
# use lower case letters in your result [a to e].
def convert(n):
result = {i: chr(97+i) for i in range(5)}
return result[n]
def reverse_of_number(n):
s = 0
while n > 0:
s = s * 10 + n % 5
n /= 5
return s
def to_custom_base5(number):
result = []
x = 0
while number > 0:
x = number % 5
result.append(convert(x))
number /= 5
result.reverse()
return "".join(result)
# Notes:
# - if s is not a string, raise TypeError
# - if the encoding is not right or empty string, raise ValueError
# - allow both - and + as prefixes which represent sign.
# - allow trailing and starting spaces (but not once the sign or number starts)
# - allow both capital and small letters.
# - return a int or long that corresponds to the number.
def base5_to_10(n):
s = 0
i = 0
while n > 0:
s += (n % 5)*(5 ** i)
n /= 10
i += 1
return s
def from_custom_base5(s):
if not isinstance(s,str):
raise TypeError("Input is not a String")
if s == "":
raise ValueError("empty string")
conv1 = [chr(65+i) for i in range(5)]
conv2 = [chr(97+i) for i in range(5)]
num = 0
for i in s:
if i in conv1:
num = num * 10 + conv1.index(i)
elif i in conv2:
num = num * 10 + conv2.index(i)
else:
raise ValueError
return base5_to_10(num)
# a basic test is given, write your own tests based on constraints.
def test_to_custom_base5():
assert "ca" == to_custom_base5(10)
# a basic test is given, write your own tests based on constraints.
def test_from_custom_base5():
assert 10 == from_custom_base5("ca") | StarcoderdataPython |
3224026 | <filename>kaggle/decision_tree.py
#!/usr/bin/env python3
import pandas as pd
import pydotplus
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.externals.six import StringIO
from IPython.display import Image
def plot_dt(clf, feature_cols):
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = feature_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('img/decision_tree.png')
Image(graph.create_png())
def resampling(df_train):
# class count
count_class_0, count_class_1 = df_train.target.value_counts()
# Divide by class
df_class_0 = df_train[df_train['target'] == 0]
df_class_1 = df_train[df_train['target'] == 1]
df_class_0_under = df_class_0.sample(count_class_1)
return pd.concat([df_class_0_under, df_class_1])
def main():
train = pd.read_csv('data/train.csv')
#test = pd.read_csv('data/test.csv')
train = resampling(train)
#target_count = train.target.value_counts()
#print('Class 0:', target_count[0])
#print('Class 1:', target_count[1])
labels = train.columns[2:] # Remove id and target
X = train[labels]
y = train['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# Create
classifier = DecisionTreeClassifier()
# Train
classifier.fit(X_train, y_train)
# Predict
y_pred = classifier.predict(X_test)
# Evaluate
print('\nReport: ')
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: ')
print(confusion_matrix(y_test, y_pred))
print('\nAccuracy: ')
print(accuracy_score(y_test, y_pred)*100)
#plot_dt(classifier,labels)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4931991 | <reponame>ch1huizong/learning
def somename(self, *args):
## ...some preliminary task...
try:
super_method = super(cls, self).somename
except AttributeError:
return None
else:
return super_method(*args)
| StarcoderdataPython |
4815675 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def function(number):
def function_2():
return number + 3
return function_2()
if __name__ == '__main__':
k = float(input("ะะฒะตะดะธัะต ัะธัะปะพ: "))
cnt = function(k)
print(cnt)
| StarcoderdataPython |
3242541 | <filename>kili423/kili423.py
#!/usr/bin/python3.1
import sys
src = sys.argv[1]
dst = src.replace(".kicad_mod","old.kicad_mod")
with open(src,'r') as f:
srcMod = f.read();
srcMod = srcMod.replace('F.Fab','Eco1.User')
srcMod = srcMod.replace('B.Fab','Eco1.User')
srcMod = srcMod.replace('F.CrtYd','Eco2.User')
srcMod = srcMod.replace('B.CrtYd','Eco2.User')
print(srcMod)
with open(dst,'w') as f:
f.write(srcMod)
| StarcoderdataPython |
3286040 | # pretty printing for stage 2.
# put "source /path/to/stage2_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
import re
import gdb.printing
class TypePrinter:
no_payload_count = 4096
# Keep in sync with src/type.zig
# Types which have no payload do not need to be entered here.
payload_type_names = {
'array_u8': 'type.Len',
'array_u8_sentinel_0': 'Len',
'single_const_pointer': 'ElemType',
'single_mut_pointer': 'ElemType',
'many_const_pointer': 'ElemType',
'many_mut_pointer': 'ElemType',
'c_const_pointer': 'ElemType',
'c_mut_pointer': 'ElemType',
'const_slice': 'ElemType',
'mut_slice': 'ElemType',
'optional': 'ElemType',
'optional_single_mut_pointer': 'ElemType',
'optional_single_const_pointer': 'ElemType',
'anyframe_T': 'ElemType',
'int_signed': 'Bits',
'int_unsigned': 'Bits',
'error_set': 'ErrorSet',
'error_set_inferred': 'ErrorSetInferred',
'error_set_merged': 'ErrorSetMerged',
'array': 'Array',
'vector': 'Array',
'array_sentinel': 'ArraySentinel',
'pointer': 'Pointer',
'function': 'Function',
'error_union': 'ErrorUnion',
'error_set_single': 'Name',
'opaque': 'Opaque',
'struct': 'Struct',
'union': 'Union',
'union_tagged': 'Union',
'enum_full, .enum_nonexhaustive': 'EnumFull',
'enum_simple': 'EnumSimple',
'enum_numbered': 'EnumNumbered',
'empty_struct': 'ContainerScope',
'tuple': 'Tuple',
'anon_struct': 'AnonStruct',
}
def __init__(self, val):
self.val = val
def tag(self):
tag_if_small_enough = self.val['tag_if_small_enough']
tag_type = tag_if_small_enough.type
if tag_if_small_enough < TypePrinter.no_payload_count:
return tag_if_small_enough
else:
return self.val['ptr_otherwise'].dereference()['tag']
def payload_type(self):
tag = self.tag()
if tag is None:
return None
type_name = TypePrinter.payload_type_names.get(str(tag))
if type_name is None:
return None
return gdb.lookup_type('struct type.%s' % type_name)
def to_string(self):
tag = self.tag()
if tag is None:
return '(invalid type)'
if self.val['tag_if_small_enough'] < TypePrinter.no_payload_count:
return '.%s' % str(tag)
return None
def children(self):
if self.val['tag_if_small_enough'] < TypePrinter.no_payload_count:
return
yield ('tag', '.%s' % str(self.tag()))
payload_type = self.payload_type()
if payload_type is not None:
yield ('payload', self.val['ptr_otherwise'].cast(payload_type.pointer()).dereference()['data'])
class ValuePrinter:
no_payload_count = 4096
# Keep in sync with src/value.zig
# Values which have no payload do not need to be entered here.
payload_type_names = {
'big_int_positive': 'BigInt',
'big_int_negative': 'BigInt',
'extern_fn': 'ExternFn',
'decl_ref': 'Decl',
'repeated': 'SubValue',
'eu_payload': 'SubValue',
'opt_payload': 'SubValue',
'empty_array_sentinel': 'SubValue',
'eu_payload_ptr': 'PayloadPtr',
'opt_payload_ptr': 'PayloadPtr',
'bytes': 'Bytes',
'enum_literal': 'Bytes',
'slice': 'Slice',
'enum_field_index': 'U32',
'ty': 'Ty',
'int_type': 'IntType',
'int_u64': 'U64',
'int_i64': 'I64',
'function': 'Function',
'variable': 'Variable',
'decl_ref_mut': 'DeclRefMut',
'elem_ptr': 'ElemPtr',
'field_ptr': 'FieldPtr',
'float_16': 'Float_16',
'float_32': 'Float_32',
'float_64': 'Float_64',
'float_80': 'Float_80',
'float_128': 'Float_128',
'error': 'Error',
'inferred_alloc': 'InferredAlloc',
'inferred_alloc_comptime': 'InferredAllocComptime',
'aggregate': 'Aggregate',
'union': 'Union',
'bound_fn': 'BoundFn',
}
def __init__(self, val):
self.val = val
def tag(self):
tag_if_small_enough = self.val['tag_if_small_enough']
tag_type = tag_if_small_enough.type
if tag_if_small_enough < ValuePrinter.no_payload_count:
return tag_if_small_enough
else:
return self.val['ptr_otherwise'].dereference()['tag']
def payload_type(self):
tag = self.tag()
if tag is None:
return None
type_name = ValuePrinter.payload_type_names.get(str(tag))
if type_name is None:
return None
return gdb.lookup_type('struct value.%s' % type_name)
def to_string(self):
tag = self.tag()
if tag is None:
return '(invalid value)'
if self.val['tag_if_small_enough'] < ValuePrinter.no_payload_count:
return '.%s' % str(tag)
return None
def children(self):
if self.val['tag_if_small_enough'] < ValuePrinter.no_payload_count:
return
yield ('tag', '.%s' % str(self.tag()))
payload_type = self.payload_type()
if payload_type is not None:
yield ('payload', self.val['ptr_otherwise'].cast(payload_type.pointer()).dereference()['data'])
pp = gdb.printing.RegexpCollectionPrettyPrinter('Zig stage2 compiler')
pp.add_printer('Type', r'^type\.Type$', TypePrinter)
pp.add_printer('Value', r'^value\.Value$', ValuePrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
| StarcoderdataPython |
3533921 | def generate_all_subset(list_, toggles, i):
if i == len(list_):
res = []
for idx in range(len(toggles)):
if toggles[idx] == 1:
res.append(list_[idx])
print(res)
else:
toggles[i] = 0
generate_all_subset(list_, toggles, i + 1)
toggles[i] = 1
generate_all_subset(list_, toggles, i + 1)
generate_all_subset([1, 2, 3, 4, 5], [0, 0, 0, 0, 0], 0)
| StarcoderdataPython |
8146948 | <reponame>gary-beautypie/pipelinewise-target-snowflake<filename>tests/unit/test_flattening.py
import unittest
import target_snowflake.flattening as flattening
class TestFlattening(unittest.TestCase):
def setUp(self):
self.config = {}
def test_flatten_schema(self):
"""Test flattening of SCHEMA messages"""
flatten_schema = flattening.flatten_schema
# Schema with no object properties should be empty dict
schema_with_no_properties = {"type": "object"}
self.assertEqual(flatten_schema(schema_with_no_properties), {})
not_nested_schema = {
"type": "object",
"properties": {
"c_pk": {"type": ["null", "integer"]},
"c_varchar": {"type": ["null", "string"]},
"c_int": {"type": ["null", "integer"]}}}
# NO FLATTENING - Schema with simple properties should be a plain dictionary
self.assertEqual(flatten_schema(not_nested_schema), not_nested_schema['properties'])
nested_schema_with_no_properties = {
"type": "object",
"properties": {
"c_pk": {"type": ["null", "integer"]},
"c_varchar": {"type": ["null", "string"]},
"c_int": {"type": ["null", "integer"]},
"c_obj": {"type": ["null", "object"]}}}
# NO FLATTENING - Schema with object type property but without further properties should be a plain dictionary
self.assertEqual(flatten_schema(nested_schema_with_no_properties),
nested_schema_with_no_properties['properties'])
nested_schema_with_properties = {
"type": "object",
"properties": {
"c_pk": {"type": ["null", "integer"]},
"c_varchar": {"type": ["null", "string"]},
"c_int": {"type": ["null", "integer"]},
"c_obj": {
"type": ["null", "object"],
"properties": {
"nested_prop1": {"type": ["null", "string"]},
"nested_prop2": {"type": ["null", "string"]},
"nested_prop3": {
"type": ["null", "object"],
"properties": {
"multi_nested_prop1": {"type": ["null", "string"]},
"multi_nested_prop2": {"type": ["null", "string"]}
}
}
}
}
}
}
# NO FLATTENING - Schema with object type property but without further properties should be a plain dictionary
# No flattening (default)
self.assertEqual(flatten_schema(nested_schema_with_properties), nested_schema_with_properties['properties'])
# NO FLATTENING - Schema with object type property but without further properties should be a plain dictionary
# max_level: 0 : No flattening (default)
self.assertEqual(flatten_schema(nested_schema_with_properties, max_level=0),
nested_schema_with_properties['properties'])
# FLATTENING - Schema with object type property but without further properties should be a dict with
# flattened properties
self.assertEqual(flatten_schema(nested_schema_with_properties, max_level=1),
{
'c_pk': {'type': ['null', 'integer']},
'c_varchar': {'type': ['null', 'string']},
'c_int': {'type': ['null', 'integer']},
'c_obj__nested_prop1': {'type': ['null', 'string']},
'c_obj__nested_prop2': {'type': ['null', 'string']},
'c_obj__nested_prop3': {
'type': ['null', 'object'],
"properties": {
"multi_nested_prop1": {"type": ["null", "string"]},
"multi_nested_prop2": {"type": ["null", "string"]}
}
}
})
# FLATTENING - Schema with object type property but without further properties should be a dict with
# flattened properties
self.assertEqual(flatten_schema(nested_schema_with_properties, max_level=10),
{
'c_pk': {'type': ['null', 'integer']},
'c_varchar': {'type': ['null', 'string']},
'c_int': {'type': ['null', 'integer']},
'c_obj__nested_prop1': {'type': ['null', 'string']},
'c_obj__nested_prop2': {'type': ['null', 'string']},
'c_obj__nested_prop3__multi_nested_prop1': {'type': ['null', 'string']},
'c_obj__nested_prop3__multi_nested_prop2': {'type': ['null', 'string']}
})
def test_flatten_record(self):
"""Test flattening of RECORD messages"""
flatten_record = flattening.flatten_record
empty_record = {}
# Empty record should be empty dict
self.assertEqual(flatten_record(empty_record), {})
not_nested_record = {"c_pk": 1, "c_varchar": "1", "c_int": 1}
# NO FLATTENING - Record with simple properties should be a plain dictionary
self.assertEqual(flatten_record(not_nested_record), not_nested_record)
nested_record = {
"c_pk": 1,
"c_varchar": "1",
"c_int": 1,
"c_obj": {
"nested_prop1": "value_1",
"nested_prop2": "value_2",
"nested_prop3": {
"multi_nested_prop1": "multi_value_1",
"multi_nested_prop2": "multi_value_2",
}}}
# NO FLATTENING - No flattening (default)
self.assertEqual(flatten_record(nested_record),
{
"c_pk": 1,
"c_varchar": "1",
"c_int": 1,
"c_obj": '{"nested_prop1": "value_1", "nested_prop2": "value_2", "nested_prop3": {'
'"multi_nested_prop1": "multi_value_1", "multi_nested_prop2": "multi_value_2"}}'
})
# NO FLATTENING
# max_level: 0 : No flattening (default)
self.assertEqual(flatten_record(nested_record, max_level=0),
{
"c_pk": 1,
"c_varchar": "1",
"c_int": 1,
"c_obj": '{"nested_prop1": "value_1", "nested_prop2": "value_2", "nested_prop3": {'
'"multi_nested_prop1": "multi_value_1", "multi_nested_prop2": "multi_value_2"}}'
})
# SEMI FLATTENING
# max_level: 1 : Semi-flattening (default)
self.assertEqual(flatten_record(nested_record, max_level=1),
{
"c_pk": 1,
"c_varchar": "1",
"c_int": 1,
"c_obj__nested_prop1": "value_1",
"c_obj__nested_prop2": "value_2",
"c_obj__nested_prop3": '{"multi_nested_prop1": "multi_value_1", "multi_nested_prop2": '
'"multi_value_2"}'
})
# FLATTENING
self.assertEqual(flatten_record(nested_record, max_level=10),
{
"c_pk": 1,
"c_varchar": "1",
"c_int": 1,
"c_obj__nested_prop1": "value_1",
"c_obj__nested_prop2": "value_2",
"c_obj__nested_prop3__multi_nested_prop1": "multi_value_1",
"c_obj__nested_prop3__multi_nested_prop2": "multi_value_2"
})
def test_flatten_record_with_flatten_schema(self):
flatten_record = flattening.flatten_record
flatten_schema = {
"id": {
"type": [
"object",
"array",
"null"
]
}
}
test_cases = [
(
True,
{
"id": 1,
"data": "xyz"
},
{
"id": "1",
"data": "xyz"
}
),
(
False,
{
"id": 1,
"data": "xyz"
},
{
"id": 1,
"data": "xyz"
}
)
]
for idx, (should_use_flatten_schema, record, expected_output) in enumerate(test_cases):
output = flatten_record(record, flatten_schema if should_use_flatten_schema else None)
self.assertEqual(output, expected_output, f"Test {idx} failed. Testcase: {test_cases[idx]}")
| StarcoderdataPython |
9636444 | from datetime import datetime
from cleo.commands.command import Command
from cleo.helpers import argument, option
from poetry.core.version.exceptions import InvalidVersion
from poetry.poetry import Poetry
from poetry_release.git import Git
from poetry_release.exception import UpdateVersionError
from poetry_release.replace import Template, Replacer
from poetry_release.settings import Settings
from poetry_release.version import ReleaseLevel, ReleaseVersion
class ReleaseCommand(Command): # type: ignore
name = "release"
description = (
"Plugin for release management in projects "
"based on Poetry"
)
arguments = [
argument(
"level",
description="Release level",
optional=True,
default=ReleaseLevel.RELEASE.value,
)
]
options = [
option(
"disable-push",
description="Disable push commits and tags in repository",
flag=True,
value_required=False,
),
option(
"disable-tag",
description="Disable creating git tags",
flag=True,
value_required=False,
),
option(
"disable-dev",
description="Disable bump version after stable release",
flag=True,
value_required=False,
)
]
help = """\
The release command helps you to control your project version.
It allows bump version, create tags and commit and push them
to project repository. Supported release levels are:
major, minor, patch, release, rc, beta, alpha
"""
def handle(self) -> None:
try:
settings = Settings(self)
git = Git(settings)
if not git.repo_exists():
self.line(
"<fg=yellow>Git repository not found. "
"Please initialize repository in your project"
)
return
if git.has_modified():
self.line(
"<fg=yellow>There are uncommitted changes "
"in the repository. Please make a commit</>"
)
return
poetry = self.application.poetry
next_release = ReleaseLevel.parse(self.argument("level"))
releaser = ReleaseVersion(
poetry.package.version,
next_release,
)
if not self.confirm(
f'Release {poetry.package.name} {releaser.next_version.text}?',
False, '(?i)^(y|j)'
):
return
if releaser.version.text == releaser.next_version.text:
self.line("<fg=yellow> Version doesn't changed</>")
return
templates = Template(
package_name=poetry.package.name,
prev_version=releaser.version.text,
version=releaser.next_version.text,
next_version=releaser.next_pre_version.text
if releaser.next_pre_version else "",
date=datetime.today().strftime("%Y-%m-%d"),
)
replacer = Replacer(templates, settings)
replacer.update_replacements()
message = replacer.generate_messages()
self.set_version(poetry, releaser.next_version.text)
# GIT RELEASE COMMIT
git.create_commit(message.release_commit)
if not settings.disable_push:
git.push_commit()
# GIT TAG
if not settings.disable_tag:
git.create_tag(message.tag_name, message.tag_message)
if not settings.disable_push:
git.push_tag(message.tag_name)
# GIT NEXT ITERATION COMMIT
if not settings.disable_dev:
pre_release = releaser.next_pre_version
if pre_release is not None:
self.set_version(poetry, pre_release.text)
git.create_commit(message.post_release_commit)
if not settings.disable_push:
git.push_commit()
except RuntimeError as e:
self.line(f"<fg=red>{e}</>")
except InvalidVersion as e:
self.line(f"<fg=yellow>{e}</>")
except UpdateVersionError as e:
self.line(f"<fg=yellow>{e}</>")
def set_version(self, poetry: Poetry, version: str) -> None:
content = poetry.file.read()
poetry_content = content["tool"]["poetry"]
poetry_content["version"] = version
poetry.file.write(content)
def release_factory() -> ReleaseCommand:
return ReleaseCommand()
| StarcoderdataPython |
6638079 | <reponame>Open-Innovation-Platform-OIP/contentready_oip
from __future__ import unicode_literals
import frappe
def get_context(context):
context.title = context.doc.full_name
return context
| StarcoderdataPython |
4832757 | def do(i):
return i+2 | StarcoderdataPython |
3472561 | '''Testing beetools__init__()'''
from pathlib import Path
from beetools.beearchiver import Archiver
import beetools
_PROJ_DESC = __doc__.split('\n')[0]
_PROJ_PATH = Path(__file__)
def project_desc():
return _PROJ_DESC
b_tls = Archiver(_PROJ_DESC, _PROJ_PATH)
class TestBEETools:
def test__init__(self, env_setup_self_destruct):
"""Assert class __init__"""
env_setup = env_setup_self_destruct
t_beetools = beetools.BEETools("BEETools", env_setup.dir)
assert t_beetools.success
pass
def test_method_1(self, env_setup_self_destruct):
"""Assert class __init__"""
env_setup = env_setup_self_destruct
t_beetools = beetools.BEETools("BEETools", env_setup.dir)
assert t_beetools.method_1("THis is a test message for Method_1")
pass
def test_do_examples(self):
beetools.do_examples()
del b_tls
| StarcoderdataPython |
11282913 | <filename>examples/botexample.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""A simple bot script.
This sample script leverages web.py (see http://webpy.org/). By default the
web server will be reachable at port 8080 - append a different port when
launching the script if desired. ngrok can be used to tunnel traffic back to
your server if you don't wish to expose your machine publicly to the Internet.
You must create a Spark webhook that points to the URL where this script is
hosted. You can do this via the CiscoSparkAPI.webhooks.create() method.
Additional Spark webhook details can be found here:
https://developer.ciscospark.com/webhooks-explained.html
A bot must be created and pointed to this server in the My Apps section of
https://developer.ciscospark.com. The bot's Access Token should be added as a
'SPARK_ACCESS_TOKEN' environment variable on the web server hosting this
script.
NOTE: While this script is written to support Python versions 2 and 3, as of
the time of this writing web.py (v0.38) only supports Python 2.
Therefore this script only supports Python 2.
"""
from __future__ import print_function
from builtins import object
import json
import web
import requests
from ciscosparkapi import CiscoSparkAPI, Webhook
# Module constants
CAT_FACTS_URL = 'http://catfacts-api.appspot.com/api/facts?number=1'
# Global variables
urls = ('/sparkwebhook', 'webhook') # Your Spark webhook should point to http://<serverip>:8080/sparkwebhook
app = web.application(urls, globals()) # Create the web application instance
api = CiscoSparkAPI() # Create the Cisco Spark API connection object
def get_catfact():
"""Get a cat fact from appspot.com and return it as a string.
Functions for Soundhound, Google, IBM Watson, or other APIs can be added
to create the desired functionality into this bot.
"""
response = requests.get(CAT_FACTS_URL, verify=False)
response_dict = json.loads(response.text)
return response_dict['facts'][0]
class webhook(object):
def POST(self):
"""Respond to inbound webhook JSON HTTP POSTs from Cisco Spark."""
json_data = web.data() # Get the POST data sent from Spark
print("\nWEBHOOK POST RECEIVED:")
print(json_data, "\n")
webhook_obj = Webhook(json_data) # Create a Webhook object from the JSON data
room = api.rooms.get(webhook_obj.data.roomId) # Get the room details
message = api.messages.get(webhook_obj.data.id) # Get the message details
person = api.people.get(message.personId) # Get the sender's details
print("NEW MESSAGE IN ROOM '{}'".format(room.title))
print("FROM '{}'".format(person.displayName))
print("MESSAGE '{}'\n".format(message.text))
# This is a VERY IMPORTANT loop prevention control step.
# If you respond to all messages... You will respond to the messages
# that the bot posts and thereby create a loop condition.
me = api.people.me()
if message.personId == me.id:
# Message was sent by me (bot); do not respond.
return 'OK'
else:
# Message was sent by someone else; parse message and respond.
if "/CAT" in message.text:
print("FOUND '/CAT'")
cat_fact = get_catfact() # Get a cat fact
print("SENDING CAT FACT '{}'".format(cat_fact))
response_message = api.messages.create(room.id, text=cat_fact) # Post the fact to the room where the request was received
return 'OK'
if __name__ == '__main__':
# Start the web.py web server
app.run()
| StarcoderdataPython |
3226769 | <gh_stars>1-10
#!/usr/bin/env python
"""
Usage: thingpin -h
thingpin [options] run
thingpin create-config
thingpin [options] install-service
Monitor GPIO pins and update AWS IoT via MQTT.
Arguments:
run run the thingpin monitor
create-config generate sample YAML thingpin-config.yml and exit
install-service install daemon to run automatically on boot
Options:
-h --help show usage and exit
-c --config=CONFIG YAML config file [default: thingpin-config.yml]
-p --pidfile=PIDFILE run as a daemon, writing process id to PIDFILE
-l --log=LOG log file to use. By default /var/log/thingpin.log
is used when running as a daemon and standard
out is used otherwise.
"""
import os
import sys
import time
import yaml
import docopt
import shutil
import pkg_resources
import traceback
import signal
from .logger import Logger
from .notifiers import create_notifier
try:
from .thingpin import Thingpin
except ImportError:
Thingpin = None
def main():
args = docopt.docopt(__doc__)
if args['create-config']:
sample = pkg_resources.resource_filename('thingpin',
'thingpin-config.yml.sample')
config_file = 'thingpin-config.yml'
if os.path.exists(config_file):
print('config file {} already exists, not overwriting'.format(
config_file))
return 2
else:
shutil.copyfile(sample, config_file)
print('created config file: {}'.format(config_file))
return
config_file = os.path.expanduser(args['--config'])
with open(config_file) as f:
config = yaml.load(f)
if args['install-service']:
print('** coming soon - watch this space **')
return
log = get_logger(args)
if Thingpin is None:
log.error('must run on Raspberry Pi')
return 1
# TODO: support more than one
notifier_config = config['notifiers'].items()[0]
notifier = create_notifier(notifier_config[0], notifier_config[1])
service = Thingpin(notifier,
pin_mode=config['pin_mode'],
things=config['things'],
debug=config.get('debug', False))
pidfile = args.get('--pidfile')
if pidfile is not None:
with open(os.path.expanduser(pidfile), "w") as f:
f.write(str(os.getpid()))
try:
service.run()
except KeyboardInterrupt:
log.info('exiting on Ctrl-C...')
service.cleanup()
return
def get_logger(args):
log_file = args.get('--log')
if log_file is None and args.get('--pidfile'):
log_file = '/var/log/thingpin.log'
return Logger(log_file=log_file)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
8151292 | points = 0
print("What is the land velocity of an unladen swallow? ")
print("a)50 kph")
print("b)9001 kph")
print("c)African or European?")
answer = input(">>> ").lower()
if answer == "c":
print("Good Job!!")
points = points + 20
else:
print("got to KnowYourMeme.com--you're out of touch")
## < > <= >= != == is is not
| StarcoderdataPython |
8083608 | import os
import cv2
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from test_single_img import load_model_and_label, pred_single
load_model_and_label()
IMAGE_DIR = 'net/need_tagged/'
NPY_DIR = 'net/predictions/'
for file_name in tqdm(os.listdir(IMAGE_DIR)):
img = cv2.imread(os.path.join(IMAGE_DIR, file_name))
prediction = pred_single(img)
np.save(os.path.join(NPY_DIR, file_name[:-3]+'npy'), prediction)
| StarcoderdataPython |
11276536 | import sys
rep_word = ['temp']
rep_word2 = ["frequency"]
with open(sys.argv[1]) as oldfile, open("temp-out.csv","w") as newfile_temp:
for line in oldfile:
if any(bad_word in line for bad_word in rep_word):
newfile_temp.write(line)
with open(sys.argv[1]) as oldfile, open("frequency.csv","w") as newfile_frequency:
for line in oldfile:
if any(bad_word2 in line for bad_word2 in rep_word2):
newfile_frequency.write(line) | StarcoderdataPython |
3494904 | <gh_stars>1-10
from django.apps import AppConfig
class ElgamalreConfig(AppConfig):
name = 'elgamalre'
| StarcoderdataPython |
347794 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
from WeaknessTargetListCtrl import WeaknessTargetListCtrl
from PersonaImpactListCtrl import PersonaImpactListCtrl
from GoalObstacleListCtrl import GoalObstacleListCtrl
__author__ = '<NAME>'
class WeaknessTargetPage(wx.Panel):
def __init__(self,parent,winId,cvName,targets):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
asBox = wx.StaticBox(self,-1)
asBoxSizer = wx.StaticBoxSizer(asBox,wx.HORIZONTAL)
topSizer.Add(asBoxSizer,1,wx.EXPAND)
self.targetList = WeaknessTargetListCtrl(self,winId,cvName)
self.targetList.load(targets)
asBoxSizer.Add(self.targetList,1,wx.EXPAND)
self.SetSizer(topSizer)
class PersonaImpactPage(wx.Panel):
def __init__(self,parent,winId,cvName,envName):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
asBox = wx.StaticBox(self,-1)
asBoxSizer = wx.StaticBoxSizer(asBox,wx.HORIZONTAL)
topSizer.Add(asBoxSizer,1,wx.EXPAND)
self.personaImpactList = PersonaImpactListCtrl(self,winId,cvName,envName)
asBoxSizer.Add(self.personaImpactList,1,wx.EXPAND)
self.SetSizer(topSizer)
class GoalObstaclePage(wx.Panel):
def __init__(self,parent,winId,cvName,envName):
wx.Panel.__init__(self,parent)
topSizer = wx.BoxSizer(wx.VERTICAL)
asBox = wx.StaticBox(self,-1)
asBoxSizer = wx.StaticBoxSizer(asBox,wx.HORIZONTAL)
topSizer.Add(asBoxSizer,1,wx.EXPAND)
self.goalObstacleList = GoalObstacleListCtrl(self,winId,cvName,envName)
asBoxSizer.Add(self.goalObstacleList,1,wx.EXPAND)
self.SetSizer(topSizer)
class WeaknessAnalysisNotebook(wx.Notebook):
def __init__(self,parent,cvName,envName):
wx.Notebook.__init__(self,parent,WEAKNESSANALYSIS_NOTEBOOKWEAKNESS_ID)
b = Borg()
thrTargets,vulTargets = b.dbProxy.componentViewWeaknesses(cvName,envName)
p1 = WeaknessTargetPage(self,WEAKNESSANALYSIS_LISTTHREATS_ID,cvName,thrTargets)
p2 = WeaknessTargetPage(self,WEAKNESSANALYSIS_LISTVULNERABILITIES_ID,cvName,vulTargets)
p3 = PersonaImpactPage(self,WEAKNESSANALYSIS_LISTPERSONAIMPACT_ID,cvName,envName)
p4 = GoalObstaclePage(self,WEAKNESSANALYSIS_LISTGOALOBSTACLE_ID,cvName,envName)
self.AddPage(p1,'Threats')
self.AddPage(p2,'Vulnerabilities')
self.AddPage(p3,'Persona Impact')
self.AddPage(p4,'Obstacles')
| StarcoderdataPython |
1822546 | #!/usr/bin/env python
# encoding: utf-8
from .views import auth_bp
| StarcoderdataPython |
3354140 | <reponame>wookayin/acme
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for action_metrics_observers."""
from acme import specs
from acme.testing import fakes
from acme.utils.observers import action_metrics
import dm_env
import numpy as np
from absl.testing import absltest
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.BoundedArray(
shape=(1,), dtype=np.float32, minimum=-100., maximum=100.),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
_FAKE_ENV = _make_fake_env()
_TIMESTEP = _FAKE_ENV.reset()
class ActionMetricsTest(absltest.TestCase):
def test_observe_nothing(self):
observer = action_metrics.ContinuousActionObserver()
self.assertEqual({}, observer.get_metrics())
def test_observe_first(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
self.assertEqual({}, observer.get_metrics())
def test_observe_single_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
self.assertEqual(
{
'action[0]_max': 1,
'action[0]_min': 1,
'action[0]_mean': 1,
'action[0]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_step(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([1]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([4]))
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([5]))
self.assertEqual(
{
'action[0]_max': 5,
'action[0]_min': 1,
'action[0]_mean': 10 / 3,
'action[0]_p50': 4,
},
observer.get_metrics(),
)
def test_observe_zero_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array(1))
self.assertEqual(
{
'action[]_max': 1,
'action[]_min': 1,
'action[]_mean': 1,
'action[]_p50': 1,
},
observer.get_metrics(),
)
def test_observe_multiple_dimensions(self):
observer = action_metrics.ContinuousActionObserver()
observer.observe_first(env=_FAKE_ENV, timestep=_TIMESTEP)
observer.observe(
env=_FAKE_ENV, timestep=_TIMESTEP, action=np.array([[1, 2], [3, 4]]))
np.testing.assert_equal(
{
'action[0, 0]_max': 1,
'action[0, 0]_min': 1,
'action[0, 0]_mean': 1,
'action[0, 0]_p50': 1,
'action[0, 1]_max': 2,
'action[0, 1]_min': 2,
'action[0, 1]_mean': 2,
'action[0, 1]_p50': 2,
'action[1, 0]_max': 3,
'action[1, 0]_min': 3,
'action[1, 0]_mean': 3,
'action[1, 0]_p50': 3,
'action[1, 1]_max': 4,
'action[1, 1]_min': 4,
'action[1, 1]_mean': 4,
'action[1, 1]_p50': 4,
},
observer.get_metrics(),
)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
297184 | import numpy as np
# import re
import pickle
from keras.models import load_model
from keras.preprocessing import text, sequence
from keras.preprocessing.sequence import pad_sequences
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from lime.lime_text import LimeTextExplainer
import spacy
class AmazonReviewCNN:
# Initializing
def __init__(self, path):
self.model_path = path + "/models/amazon_reviews_char_cnn.h5"
self.tokenizer_path = path + "/models/amazon_reviews_char_cnn_dictionary.pkl"
self.output_path = path + "/static/output/"
self.out_words = "word_level_explanation.png"
self.out_full_text = "full_text_explanation.html"
self.text = "default value of text"
self.page_title = "Amazon Reviews CNN"
self.class_1_text = "Amongst the fastest for less than half the price, yes this is a nice product. I tested this Samsung 128GB 100MB/s (U3) MicroSD EVO Select extensively . all I can say this is a solid performer on a bargain price."
self.class_1_label = "Positive"
self.class_2_text = "I had this card in my phone for less than a month. Suddenly, my phone started restarting itself. One day it went into a restart loop . Would not boot up. After some troubleshooting I removed the SD card and my phone finally restarted. Since I format the card to the phone, I can get nothing off of it. It will not work in my phone. It's a total loss! Don't buy this for a phone."
self.class_2_label = "Negative"
self.class_names = ['negative', 'positive']
# load the model here
self.model = self.load_model_text()
self.dictionary = self.load_tokenizer_model()
self.stopwords_spacy = self.load_spacy_stopwords()
self.explainer = self.create_explainer_object()
# Deleting (Calling destructor)
def __del__(self):
print('Destructor called, AmazonReviewLSTM object deleted.')
def load_model_text(self):
return(load_model(self.model_path))
def load_tokenizer_model(self):
# loading tokenizer model
with open(self.tokenizer_path, 'rb') as handle:
return(pickle.load(handle))
def load_spacy_stopwords(self):
nlp = spacy.load('en')
stopwords_spacy = list(set(nlp.Defaults.stop_words))
stopwords_spacy.extend(["it's","i've"])
return stopwords_spacy
def tokenize_string(self, string):
string = string.replace(',','').replace('!','').replace('.','').replace("?","")
return ([word for word in string.split(' ') if word.lower() not in self.stopwords_spacy])
def prediction_pipeline(self, text_test):
final_output = []
for t in text_test:
review_idx = np.array(self.dictionary.doc2idx(list(t)))
filtered_review_idx = review_idx[review_idx!=-1]
pad_review_idx = pad_sequences([filtered_review_idx], maxlen=800, padding='post', value=-1)[0]
outputs = self.model.predict(np.array([pad_review_idx]))
final_output.append(outputs[0].tolist())
return np.array(final_output)
def create_explainer_object(self):
explainer = LimeTextExplainer(
split_expression=self.tokenize_string,
# bow=True,
class_names=self.class_names
)
return explainer
def explain(self):
explainer = self.explainer.explain_instance(self.text, classifier_fn=self.prediction_pipeline, num_features=10)
plot = explainer.as_pyplot_figure()
plot.tight_layout()
plot.savefig(self.output_path + self.out_words)
explainer.save_to_file(self.output_path + self.out_full_text)
with open(self.output_path + self.out_full_text) as oldfile, open(self.output_path + '/full_explanation.html', 'w') as newfile:
for line in oldfile:
if 'exp_div' not in line:
newfile.write(line)
print("Success", self.text)
| StarcoderdataPython |
6415005 | <filename>Design/chanwoo/loststars/controller.py
import db
import button_maker as button
import random
def show_userinfo(bot):
result = db.get_userinfo("telegram",str(bot.chat_id))
text = '''{}๋์ด ์
๋ ฅํ์ ์ ๋ณด๋ ์๋์ ๊ฐ์ต๋๋ค\n์ฑ๋ณ : {}\n๋์ด : {}\n์ฌํ์ง : {}\n์ฌํ๊ธฐ๊ฐ : {}\n์ฌํ์ ๋ณด : {}\n์นด์นด์คid : {}\n''' \
.format(bot.name, result['sex'][0], result['age'][0], result['city'][0], result['start_date'][0] \
+ " ~ " + result['end_date'][0], result['appeal_tag'][0],result['kakao_id'][0])
bot.send_img(result['profile_image'][0],text,button.update_button())
#bot.send_message("์์ ํ๊ณ ์ถ์ ํญ๋ชฉ์ ๊ณจ๋ผ๋ด",button.userinfo_update_keyboard())
# if bot.state == "update":
#
# bot.send_message("test",button.userinfo_update_keyboard())
# else:
# bot.send_message("test", button.existing_user_keyboard())
def create_callback_data(button_type,category):
""" Create the callback data associated to each button"""
return ";".join([button_type,category])
def separate_callback_data(data):
""" Separate the callback data"""
return data.split(";")
# ์ฌ์ฉ์์ ๋ฒํผ ์
๋ ฅ ์ปจํธ๋กค๋ฌ
def button_controller(bot):
query = bot.text['callback_query']
# ์ด๋ค keyboard ์
๋ ฅ์ธ์ง ๊ตฌ๋ถ
button_type = separate_callback_data(query['data'])[0]
if bot.state == 'sex':
sex = separate_callback_data(query['data'])[1]
db.insert_value(bot.chat_id,'sex',sex)
db.insert_value(bot.chat_id,'dialog_state', "age")
bot.send_message("์ด๋ฒ์๋ ๋์ ๋์ด๋ฅผ ์๊ณ ์ถ์ด")
elif bot.state == 'date' or bot.state=="update_date":
#
message_id = query['message']['message_id']
text = query['message']['text']
user_input = button.process_calendar_selection(bot)
if user_input:
print(user_input)
if not db.get_single_value(bot.chat_id,'is_end'):
db.insert_value(bot.chat_id,'start_date',user_input)
db.insert_value(bot.chat_id,'is_end',1)
start = db.get_single_value(bot.chat_id,'start_date')
end = db.get_single_value(bot.chat_id, 'end_date')
bot.edit_message(text,message_id,button.create_calendar(start=start,end=end))
else:
db.insert_value(bot.chat_id,'end_date',user_input)
db.insert_value(bot.chat_id,'is_end',0)
start = db.get_single_value(bot.chat_id, 'start_date')
end = db.get_single_value(bot.chat_id, 'end_date')
bot.edit_message(text, message_id, button.create_calendar(start=start, end=end))
elif button_type =="kakao_id":
open_cnt = db.get_single_value(bot.chat_id, "open_cnt")
if open_cnt > 3:
bot.send_message("์นด์นด์คํกid๋ ํ๋ฃจ์ 3๋ฒ๋ง ๋ณผ ์ ์์ด ใ
ใ
")
else:
bot.send_message(separate_callback_data(query['data'])[1])
db.insert_value(bot.chat_id,"open_cnt",int(open_cnt)+1)
elif bot.state == "match":
prev_or_next = query['data']
#updateํ ๋ฉ์ธ์ง id
match_photo_id = db.get_single_value(bot.chat_id, "match_photo_id")
#db์์ ๋ฆฌ์คํธ ๋ฐ์์จ๋ค
matched_list = (db.get_single_value(bot.chat_id,"matched_list")).split(",")
match_cnt = len(matched_list)
#ํ์ฌ ์ฌ์ฉ์์ idx (์ด๊ธฐ๊ฐ์ 0)
idx = db.get_single_value(bot.chat_id, "match_idx")
if prev_or_next == "RIGHT":
idx += 1
if idx >= match_cnt:
idx=0
db.insert_value(bot.chat_id, "match_idx", int(idx))
else:
idx -= 1
if idx<0:
idx = match_cnt-1
db.insert_value(bot.chat_id, "match_idx", int(idx))
print(idx)
#๋ฐฉํฅํค ์ก์
์ ์ํ ์ธ๋ฑ์ค์ ๋ํ ์ฌ๋ ์ฐพ๊ธฐ
matched_person_platform = matched_list[idx][0]
matched_person_id = matched_list[idx][1:]
if matched_person_platform == 't':
matched_person_info = db.get_userinfo("telegram",matched_person_id)
img_url = matched_person_info['profile_image'][0]
text = '''{}๋์ ์ ๋ณด๋ ์๋์ ๊ฐ์ต๋๋ค\n์ฑ๋ณ : {}\n๋์ด : {}\n์ฌํ์ง : {}\n์ฌํ๊ธฐ๊ฐ : {}\nํ๊ทธ : {}\n''' \
.format(matched_person_info['user_id'][0], str(matched_person_info['sex'][0]), matched_person_info['age'][0],
matched_person_info['city'][0], matched_person_info['start_date'][0] + " ~ " +matched_person_info['end_date'][0],
matched_person_info['appeal_tag'][0])
bot.edit_media(img_url, match_photo_id,button.kakao_button(matched_person_info['kakao_id'][0]))
bot.edit_caption(text, match_photo_id,button.kakao_button(matched_person_info['kakao_id'][0]))
elif matched_person_platform =='k':
matched_person_info = db.get_userinfo("kakaotalk", matched_person_id)
img_url = matched_person_info['profile_image'][0]
print(img_url)
text = '''{}๋์ ์ ๋ณด๋ ์๋์ ๊ฐ์ต๋๋ค\n์ฑ๋ณ : {}\n๋์ด : {}\n์ฌํ์ง : {}\n์ฌํ๊ธฐ๊ฐ : {}\nํ๊ทธ : {}\n''' \
.format(matched_person_info['user_id'][0], str(matched_person_info['sex'][0]),
matched_person_info['age'][0],
matched_person_info['city'][0],
matched_person_info['start_date'][0] + " ~ " + matched_person_info['end_date'][0],
matched_person_info['appeal_tag'][0])
print(img_url)
bot.edit_media(img_url, match_photo_id, button.kakao_button(matched_person_info['kakao_id'][0]))
bot.edit_caption(text, match_photo_id, button.kakao_button(matched_person_info['kakao_id'][0]))
elif matched_person_platform == 'f':
matched_person_info = db.get_userinfo("facebook", matched_person_id)
img_url = matched_person_info['profile_image'][0]
text = '''{}๋์ ์ ๋ณด๋ ์๋์ ๊ฐ์ต๋๋ค\n์ฑ๋ณ : {}\n๋์ด : {}\n์ฌํ์ง : {}\n์ฌํ๊ธฐ๊ฐ : {}\nํ๊ทธ : {}\n''' \
.format(matched_person_info['user_id'][0], str(matched_person_info['sex'][0]),
matched_person_info['age'][0],
matched_person_info['city'][0],
matched_person_info['start_date'][0] + " ~ " + matched_person_info['end_date'][0],
matched_person_info['appeal_tag'][0])
print(text)
print(img_url)
bot.edit_media(img_url, match_photo_id,button.kakao_button(matched_person_info['kakao_id'][0]))
bot.edit_caption(text, match_photo_id,button.kakao_button(matched_person_info['kakao_id'][0]))
# match_photo_id = db.get_single_value(bot.chat_id,"match_photo_id")
# bot.edit_media("https://imgur.com/a/yWDcVZc",match_photo_id)
# bot.edit_caption("๋ฐ๋๋ค",match_photo_id)
elif bot.state == "update":
data = query['data']
print(data)
if data == "์ฌ์ง ๋ฐ๊พธ๊ธฐ":
db.insert_value(bot.chat_id, "dialog_state", "update")
bot.send_message("๋ฐ๊พธ๊ณ ์ถ์ ์ฌ์ง์ ์ฌ๋ ค์ค")
print("change")
elif data == "ํ๊ทธ ๋ฐ๊พธ๊ธฐ":
db.insert_value(bot.chat_id, "dialog_state", "update_appeal_tag")
bot.send_message("๋ณ๊ฒฝํ ๋ด์ฉ์ ์์ฑํด์ ๋ณด๋ด์ค", button.existing_user_keyboard())
elif data == "์ฌํ ์ผ์ ๋ฐ๊พธ๊ธฐ":
db.insert_value(bot.chat_id, "dialog_state", "update_date")
bot.send_message("์๋ ๋ฌ๋ ฅ์ผ๋ก ์ฌํ ์ผ์ ์์ ํด", button.existing_user_keyboard())
bot.send_message("์ฌํ์ผ์ ์ ํ", button.create_calendar())
elif data == "์ฌํ์ง ๋ฐ๊พธ๊ธฐ":
db.insert_value(bot.chat_id, "dialog_state", "update_city")
bot.send_message("๋ณ๊ฒฝํ ์ฌํ์ง๋ฅผ ์
๋ ฅํด์ค")
# ์ฌ์ฉ์์ ํ
์คํธ ์
๋ ฅ ์ปจํธ๋กค๋ฌ
def text_controller(bot):
if bot.text == '/start' or bot.text == "ํ์ผ๋ก":
bot.send_message("์ฌํ์ ๋ ๋๋ค๋ ๊ฑด ์ ๋ง ํ๋ณตํ ์ผ์ด์ง,,\n๋๋ ์ด ์ง๊ตฌ๋ณ์ ์ฌํ์จ์ง ๋ฒ์จ 2342๋
์ด๋ ๋๋ค\n"
+ "์๋ ๋ฒํผ ์ค์ ์ํ๋ ๋ฒํผ์ ๋๋ฌ๋ณด๋ ด",button.main_keyboard())
db.insert_value(bot.chat_id,"dialog_state","start")
elif bot.text == '๋ํ ์์':
if bot.is_member == False:
bot.send_message("๋ค๋ฅธ ๋ํ ๊ตฌํ๋ ์ฌ๋๋ค์๊ฒ ๋๋ฅผ ์๋ ค์ฃผ๊ธฐ ์ํด์ ๋์ ๋ํด์ ๋ช๊ฐ์ง ์๊ณ ์ถ์ด! ๋ช๊ฐ์ง ์ง๋ฌธ์ ๋๋ตํด์คฌ์ผ๋ฉด ์ข๊ฒ ์ด")
bot.send_message("๋ํ์ ๊ตฌํ๊ธฐ ์ํด์๋ ์ฐ์ ์ ์ผ๋ก ๋์ ์ฑ๋ณ์ ์๋ ค์ค",button.sex())
db.insert_value(bot.chat_id, 'dialog_state', 'sex')
else:
result = db.get_userinfo("telegram",str(bot.chat_id))
bot.send_message("{} ๋์ {}์ ๊ฐ๋ค๊ณ ๊ธฐ์ตํ๊ณ ์๋๋ฐ ๋ง์ผ๋ฉด ๋ํ ์ฐพ๊ธฐ๋ฅผ ๋๋ฌ์ค ์๋ก์ด ์ฌํ์ ์ํ๋ฉด ์๋ก์ด ์ฌ์ ์ ์๋ ค์ค"
.format(result['start_date'][0]+" ~ "+format(result['end_date'][0]), result['city'][0]),button.existing_user_keyboard())
elif bot.text == '์ฌ์ฉ์ ์ ๋ณด ์์ ':
db.insert_value(bot.chat_id,"dialog_state","update")
bot.state = "update"
show_userinfo(bot)
#
# elif bot.text == "์ฌ์ง ๋ฐ๊พธ๊ธฐ":
#
# db.insert_value(bot.chat_id,"dialog_state","update_photo")
# bot.send_message("๋ฐ๊พธ๊ณ ์ถ์ ์ฌ์ง์ ์ฌ๋ ค์ค",button.userinfo_update_keyboard())
#
#
#
# elif bot.text == "ํ๊ทธ ๋ฐ๊พธ๊ธฐ":
#
# db.insert_value(bot.chat_id,"dialog_state","update_appeal_tag")
# bot.send_message("๋ณ๊ฒฝํ ๋ด์ฉ์ ์์ฑํด์ ๋ณด๋ด์ค",button.userinfo_update_keyboard())
#
# elif bot.text == "์ฌํ ์ผ์ ๋ฐ๊พธ๊ธฐ":
#
# db.insert_value(bot.chat_id, "dialog_state", "update_date")
# bot.send_message("์๋ ๋ฌ๋ ฅ์ผ๋ก ์ฌํ ์ผ์ ์์ ํด",button.userinfo_update_keyboard())
# bot.send_message("์ฌํ์ผ์ ์ ํ",button.create_calendar())
#
#
#
#
# elif bot.text == "์ฌํ์ง ๋ฐ๊พธ๊ธฐ":
#
# db.insert_value(bot.chat_id,"dialog_state","update_city")
# bot.send_message("๋ณ๊ฒฝํ ์ฌํ์ง๋ฅผ ์
๋ ฅํด์ค")
elif bot.text == "์๋ก์ด ์ฌํ ๋ฑ๋ก":
db.insert_value(bot.chat_id, 'dialog_state', 'date')
bot.send_message("์๋ก์ด ์ฌํ์ ๊ธฐ๊ฐ์ ๊ณจ๋ผ์ค", button.create_calendar())
elif bot.text == "๋ํ ์ฐพ๊ธฐ":
_list=db.search_user("telegram",bot.chat_id)
print(_list)
if _list is not -1:
if _list:
bot.send_message("๋์ ์ด์ธ๋ฆฌ๋ ๋ํ์ ์ฐพ์์ด!", button.swiping_button())
matched_list = []
for item in _list:
if item[1] == "telegram":
matched_list.append("t"+item[0])
elif item[1] == "kakaotalk":
matched_list.append("k" + item[0])
elif item[1] == "facebook":
matched_list.append("f" + item[0])
random.shuffle(matched_list)
matched_str = (",".join(matched_list))
db.insert_value(bot.chat_id,"matched_list",matched_str)
db.insert_value(bot.chat_id,"dialog_state","match")
db.insert_value(bot.chat_id,"match_idx",0)
matched_person_platform = matched_list[0][0]
matched_person_id = matched_list[0][1:]
if matched_person_platform == "t":
match1_info = db.get_userinfo("telegram", matched_person_id)
elif matched_person_platform == "k":
match1_info = db.get_userinfo("kakaotalk",matched_person_id)
elif matched_person_platform == "f":
match1_info = db.get_userinfo("facebook",matched_person_id)
print(match1_info['kakao_id'][0])
text = '''{}๋์ ์ ๋ณด๋ ์๋์ ๊ฐ์ต๋๋ค\n์ฑ๋ณ : {}\n๋์ด : {}\n์ฌํ์ง : {}\n์ฌํ๊ธฐ๊ฐ : {}\nํ๊ทธ : {}\n''' .format(match1_info['user_id'][0], str(match1_info['sex'][0]), match1_info['age'][0], match1_info['city'][0], match1_info['start_date'][0]+ " ~ " + match1_info['end_date'][0], match1_info['appeal_tag'][0])
bot.send_img(match1_info['profile_image'][0],text,button.kakao_button(match1_info['kakao_id'][0]))
db.insert_value(bot.chat_id, "match_photo_id", bot.message_id +2)
else:
bot.send_message("๋ํ์ ์ฐพ์ง ๋ชปํ์ด ใ
ใ
", button.main_keyboard())
elif bot.state == 'age':
db.insert_value(bot.chat_id,'age',str(bot.text))
db.insert_value(bot.chat_id,'dialog_state','profile_image')
bot.send_message("๋์ ์ฌ์ง์ด ์์ผ๋ฉด ์ฌ๋๋ค์ด ์์๋ณด๊ธฐ ์ฌ์ธ๊บผ์ผ!")
elif bot.state == "city":
db.insert_value(bot.chat_id, 'city', str(bot.text))
db.insert_value(bot.chat_id,"dialog_state","kakao_id")
bot.send_message("์นด์นด์คํก ์์ด๋๋ฅผ ์๋ ค์ฃผ๋ฉด ๋งค์นญ๋ ์ฌ๋๋ค๊ณผ ์ฐ๋ฝํ ์ ์๋๋ฐ ์๋ ค์ค๋?")
elif bot.state == "kakao_id":
db.insert_value(bot.chat_id, "kakao_id",str(bot.text))
show_userinfo(bot)
# ์ด์ ๋์ด์ ์ ๊ทํ์X
db.insert_value(bot.chat_id, 'user_state', 1)
db.insert_value(bot.chat_id, 'dialog_state', 'search')
elif bot.state == "appeal_tag":
db.insert_value(bot.chat_id,'appeal_tag',str(bot.text))
db.insert_value(bot.chat_id, 'dialog_state', 'date')
bot.send_message("์ฌํ ๊ธฐ๊ฐ์ ๊ณจ๋ผ์ค", button.create_calendar())
elif bot.state == "update_appeal_tag":
db.insert_value(bot.chat_id,"appeal_tag",str(bot.text))
bot.send_message("๋์ ์ ๋ณด๊ฐ ์๋์ ๊ฐ์ด ์์ ๋์์ด")
db.insert_value(bot.chat_id,"dialog_state","update")
show_userinfo(bot)
elif bot.state == "update_city":
db.insert_value(bot.chat_id,"city",str(bot.text))
bot.send_message("๋์ ์ ๋ณด๊ฐ ์๋์ ๊ฐ์ด ์์ ๋์์ด")
show_userinfo(bot)
else:
bot.send_message("์์ง ๊ฐ๋ฐ์ค์ด๋ค!!!!") | StarcoderdataPython |
8005298 | import datetime as dt
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
import seaice.nasateam as nt
import seaice.tools.plotter.daily_extent as de
class Test_BoundingDateRange(unittest.TestCase):
def test_standard(self):
today = dt.date(2015, 9, 22)
month_bounds = (-3, 1)
expected_bounds = (dt.date(2015, 6, 1), dt.date(2015, 10, 31))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_year(self):
today = dt.date(2001, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2000, 12, 1), dt.date(2001, 2, 28))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_leap_year(self):
today = dt.date(2016, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2015, 12, 1), dt.date(2016, 2, 29))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
class Test_GetRecordYear(unittest.TestCase):
start_date = nt.BEGINNING_OF_SATELLITE_ERA
end_date = dt.date(2015, 12, 31)
date_index = pd.date_range(start_date, end_date)
base_series = pd.Series(index=date_index).fillna(5)
def _series(self, low=None, high=None, next_highest=None, next_lowest=None):
"""Return a series for easily testing record values. All the values are 5, with
different values set to the dates passed in as low, next_lowest, high,
and next_highest. The index of the returned series is from the beginning
of the satellite era to the end of 2015 (since that happens to be the
last complete year at the time of this writing).
"""
series = self.base_series.copy()
if high:
series[high] = 10
if next_highest:
series[next_highest] = 7
if next_lowest:
series[next_lowest] = 2
if low:
series[low] = 0
return series
def test_max(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002 , recordline:2002"""
series = self._series(high='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002(min) , recordline:2002"""
series = self._series(low='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014, recordline:2010"""
series = self._series(high='2014-03-15', next_highest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014(min), recordline:2010"""
series = self._series(low='2014-03-15', next_lowest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_is_included_in_month_bounds(self):
"""Date: 2/2015, range: 10/2014 -> 3/2015, record: 1/2014, recordline: 2013-2014"""
series = self._series(low='2014-04-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-02-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_before_and_crossover_forward(self):
"""Date: 12/2015, range: 8/2015 -> 1/2016, record: 12/2014, recordline: 2014-2015"""
series = self._series(low='2014-09-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-12-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004, recordline:2004"""
series = self._series(high='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004(min), recordline:2003-2004"""
series = self._series(low='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(high='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(low='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(high='2009-11-27', next_highest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(low='2009-11-27', next_lowest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:10/2008, recordline:2007-2008"""
series = self._series(high='2008-10-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:8/2008, recordline:2007-2008"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009 """
series = self._series(high='2009-08-27', next_highest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range:10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009"""
series = self._series(low='2009-08-27', next_lowest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year(self):
"""Date: 9/2015, range:6/2015 -> 10/2015, record: 3/2015, recordline: 2010"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year_with_overlap(self):
"""Date: 9/2015, range:6/2015 -> 1/2016, record: 3/2015, recordline: 2014-2015"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 4)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record: 8/2008 , recordline:2008-2009"""
series = self._series(high='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_january_contains_record_month_same_year(self):
"""Date: 12/09, range: 09/2009 -> 1/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2009-12-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_feb_contains_record_month_different_year(self):
"""Date: 1/10, range: 09/2009 -> 2/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2010-01-15')
month_bounds = (-4, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record:8/2008 , recordline:2008-2009"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
class Test_YearWithMostMonthsInIndex(unittest.TestCase):
def test_longer_year_earlier(self):
index = pd.date_range(start='1999-01-01', end='2000-01-31')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
def test_longer_year_later(self):
index = pd.date_range(start='1999-11-01', end='2000-04-29')
actual = de._year_with_most_months_in_index(index)
expected = 2000
self.assertEqual(actual, expected)
def test_earlier_year_when_equal_months(self):
index = pd.date_range(start='1999-11-01', end='2000-02-29')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
class Test_DateIndexPrependDays(unittest.TestCase):
def test_adds_days_to_beginning_of_date_index(self):
date_index = pd.date_range(start='2005-01-05', end='2005-01-10')
days = 5
actual = de._date_index_prepend_days(date_index, days)
expected = pd.date_range(start='2004-12-31', end='2005-01-10')
self.assertTrue(actual.equals(expected))
class Test__ExtendSmoothDivide(unittest.TestCase):
def test_does_all_the_things(self):
date_index = pd.date_range(start='2000-01-06', end='2000-01-08')
nday_average = 3
divisor = 1e3
df_index = pd.Index([6, 7, 8], name='day of year')
df = pd.DataFrame({'data': [10000, 15000, 20000]}, index=df_index)
actual = de._extend_smooth_divide(df, date_index, nday_average, divisor)
# index extended
expected_index = pd.Index([3, 4, 5, 6, 7, 8])
npt.assert_array_equal(actual.index.values, expected_index.values)
# smoothed and divided
expected_data = np.array([np.nan, np.nan, np.nan, 10, 12.5, 15])
npt.assert_array_equal(actual.data.values, expected_data)
class Test_ClimatologyStatistics(unittest.TestCase):
def test_with_data_gets_average_stddevs_and_percentiles(self):
date_index = pd.date_range(start='2008-01-01', end='2008-01-10')
series1 = pd.Series([1000.0,
2000.0,
3000.0,
4000.0,
5000.0],
index=pd.date_range(start='2008-01-03', end='2008-01-07'))
series2 = pd.Series([2000.0,
3000.0,
4000.0,
5000.0,
6000.0],
index=pd.date_range(start='2009-01-03', end='2009-01-07'))
extents = series1.append(series2)
extents.name = 'total_extent_km2'
actual = de._climatology_statistics(extents, date_index,
percentiles=[0, 50, 100], nday_average=3, divisor=1e3)
expected_columns = ['climatology', 'climatology_lower', 'climatology_upper',
'percentile_0', 'percentile_50', 'percentile_100']
npt.assert_array_equal(sorted(actual.columns), sorted(expected_columns))
expected_climatology = [np.nan, np.nan, 1.5, 2., 2.5, 3.5, 4.5, 5., 5.5, np.nan]
expected_climatology_upper = [np.nan, np.nan, 2.914214, 3.414214, 3.914214, 4.914214,
5.914214, 6.414214, 6.914214, np.nan]
expected_climatology_lower = [np.nan, np.nan, 0.085786, 0.585786, 1.085786, 2.085786,
3.085786, 3.585786, 4.085786, np.nan]
npt.assert_array_equal(actual.climatology, expected_climatology)
npt.assert_array_almost_equal(actual.climatology_upper, expected_climatology_upper)
npt.assert_array_almost_equal(actual.climatology_lower, expected_climatology_lower)
expected_percentile_100 = [np.nan, np.nan, 2., 2.5, 3., 4., 5., 5.5, 6., np.nan]
npt.assert_array_equal(actual.percentile_100, expected_percentile_100)
expected_percentile_50 = [np.nan, np.nan, 1.5, 2., 2.5, 3.5, 4.5, 5., 5.5, np.nan]
npt.assert_array_equal(actual.percentile_50, expected_percentile_50)
expected_percentile_0 = [np.nan, np.nan, 1., 1.5, 2., 3., 4., 4.5, 5., np.nan]
npt.assert_array_equal(actual.percentile_0, expected_percentile_0)
| StarcoderdataPython |
1612109 | import requests
from requests.auth import HTTPDigestAuth
from atlascli.atlaskey import AtlasKey
key=AtlasKey.get_from_env()
try:
r = requests.get("https://cloud.mongodb.com/api/atlas/v1.0/groups",
headers={"Accept": "application/json",
"Content-Type": "application/json"},
auth=HTTPDigestAuth(key.public_key, key.private_key))
print(r.json())
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
print(e.response)
| StarcoderdataPython |
3347028 | <filename>app/honey/applications/example.py
from app.honey.standard import Standard
"""
้ฆๅ
ๅบ็จ็ฎๅฝไธ้่ฆๆbuild.jsonๅ
ๅฎนๅ
ๆฌ
{
'entry':'',
'requirement':''
}
"""
class AppNameClass(Standard):
def app_info(self, **kwargs):
"""้ๅๆญคๆนๆณ"""
self.name = kwargs.get('name', "desktop") # ๅบ็จๅ็งฐ
self.desc = kwargs.get('desc', 'ctpbeeๆก้ข็ซฏ') # ๅบ็จๆ่ฟฐ
self.icon = kwargs.get('icon', ":/icon/icon/bee_temp_grey.png") # ๅบ็จๅพๆ
self.versions = kwargs.get('versions',
{"1.0": "https://github.com/ctpbee/ctpbee_desktop/archive/master.zip"} # ็ๆฌๅทไปฅๅไธ่ฝฝๅฐๅ
)
self.install_version = kwargs.get("install_version", "1.0") # ้ป่ฎคๅฎ่ฃ
็ๆฌ
self.app_url = kwargs.get('app_url', "https://github.com/ctpbee/ctpbee_desktop") # ๅบ็จ้พๆฅ
| StarcoderdataPython |
9703003 | <filename>scripts/atrfu/example_time_analysis.py
from datetime import datetime
from the_candidate_generation import compute_entity_ranks_relfreqs
from lxml import etree
import os
from glob import glob
import pickle
def get_wid2w(doc):
"""
"""
wid2w = {wf_el.get('id'): wf_el.text
for wf_el in doc.xpath('text/wf')}
return wid2w
def get_mention2goldlink(input_path, debug=False):
"""
"""
mention2goldlink = {}
doc = etree.parse(input_path)
wid2w = get_wid2w(doc)
for entity_el in doc.xpath('entities/entity'):
# entity id
entity_id = entity_el.get('id')
# get gold link
ext_ref_el = entity_el.find('externalReferences/externalRef')
if ext_ref_el is None:
continue
goldlink = ext_ref_el.get('reference')
if goldlink in {'None', None}:
continue
# mentions
wids = [target_el.get('id')
for target_el in entity_el.xpath('references/span/target')]
if not wids:
continue
mention = ' '.join([wid2w[wid] for wid in wids])
goldlink = goldlink.replace('http://dbpedia.org/page',
'http://dbpedia.org/resource')
mention2goldlink[(entity_id, mention)] = (goldlink, wids)
if debug:
print()
print(entity_id, mention)
print(wids)
etree.dump(entity_el)
input('continue?')
return mention2goldlink
# create iterable for meantime corpus
views_from = datetime(2007, 12, 1)
filename='meantime_with_times.tsv'
cache_path = 'cache.pickle'
iterable = []
with open(filename, 'r') as f:
for line in f:
line=line.split('\t')
goldmention=line[0]
goldlink=line[1]
creation_time=line[2]
identifier=line[3]
print(line)
print(creation_time)
year, month, day = creation_time.split('-')
ct_datetime = datetime(int(year), int(month), int(day))
if ct_datetime < views_from:
continue
iterable.append((identifier, goldmention, goldlink, creation_time))
# run it on the meantime corpus
run = True
if run:
mt_cache, mt_avg_rank, mt_avg_relfreq = compute_entity_ranks_relfreqs(iterable, 'meantime.pickle')
print()
print('meantime.pickle')
print(round(mt_avg_rank,2), round(mt_avg_relfreq,2))
mt_cache, mt_avg_rank, mt_avg_relfreq = compute_entity_ranks_relfreqs(iterable,
'meantime2007-12.pickle',
datetime(2007, 12, 1))
print()
print('meantime2007-12.pickle')
print(round(mt_avg_rank,2), round(mt_avg_relfreq,2))
mt_cache, mt_avg_rank, mt_avg_relfreq = compute_entity_ranks_relfreqs(iterable,
'meantime2011-12.pickle',
datetime(2011, 12, 1))
print()
print('meantime2011-12.pickle')
print(round(mt_avg_rank,2), round(mt_avg_relfreq,2))
mt_cache, mt_avg_rank, mt_avg_relfreq = compute_entity_ranks_relfreqs(iterable,
'meantime2015-12.pickle',
datetime(2015, 12, 1))
print()
print('meantime2015-12.pickle')
print(round(mt_avg_rank,2), round(mt_avg_relfreq,2))
| StarcoderdataPython |
3381495 | import pytest
from teste_op import somar
from teste_op import subtrair
def test_somar():
assert somar(2, 3) == 5
def test_subtrair():
assert subtrair(2, 3) == -1 | StarcoderdataPython |
1697903 | import appdaemon.plugins.hass.hassapi as hass
import pytest
from cx_core.controller import Controller
from tests.test_utils import fake_async_function
@pytest.fixture(autouse=True)
def hass_mock(monkeypatch, mocker):
"""
Fixture for set up the tests, mocking appdaemon functions
"""
def fake_fn(*args, **kwargs):
return None
monkeypatch.setattr(hass.Hass, "__init__", fake_fn)
monkeypatch.setattr(hass.Hass, "listen_event", fake_fn)
monkeypatch.setattr(hass.Hass, "listen_state", fake_fn)
monkeypatch.setattr(hass.Hass, "log", fake_fn)
monkeypatch.setattr(hass.Hass, "call_service", fake_async_function())
@pytest.fixture(autouse=True)
def fake_controller(hass_mock):
c = Controller()
c.args = {}
return c
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.