content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import functools
num_plus = 0
@functools.lru_cache(maxsize = None)
| [
11748,
1257,
310,
10141,
198,
198,
22510,
62,
9541,
796,
657,
198,
31,
12543,
310,
10141,
13,
75,
622,
62,
23870,
7,
9806,
7857,
796,
6045,
8,
198
] | 2.428571 | 28 |
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.patches import RegularPolygon
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
IM_SCALE = 0.25
################################################################
# no margins
from skimage.transform import resize
| [
6738,
2603,
29487,
8019,
13,
28968,
3524,
1330,
3242,
2617,
5159,
11,
1052,
38983,
33,
3524,
198,
6738,
2603,
29487,
8019,
13,
8071,
2052,
1330,
23603,
34220,
14520,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
11748,
2603,
29487,
8019,
... | 3.927711 | 83 |
import subprocess
import requests
import time
import importlib
importlib.import_module("scanner")
import scanner
template = """ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
update_config=1
country=CZ
network={{
bssid={1}
ssid="{0}"
key_mgmt=WPA-EAP
pairwise=CCMP TKIP
group=CCMP TKIP
eap=PEAP
identity="bob"
password="hello"
ca_cert="/home/pi/ca.pem"
phase2="MSCHAPV2"
}}
"""
# BAD = "04:f0:21:42:1a:06"
# GOOD = "04:f0:21:45:cd:f3"
SERVER = '10.10.10.93:8000'
invalid, to_check = scanner.control_AP('wlan0', "turris-WPA2ent", "")
print(invalid, to_check)
report_third_party(invalid)
for tup in to_check:
bssid = tup[0]
generate_wpasupplicant('turris-WPA2ent', bssid)
run_scan()
print(tup)
if not check_result():
print("FAIL")
report_rogue(bssid, 'turris-WPA2ent')
else:
print("OK")
| [
11748,
850,
14681,
198,
11748,
7007,
198,
11748,
640,
198,
11748,
1330,
8019,
198,
198,
11748,
8019,
13,
11748,
62,
21412,
7203,
35836,
1008,
4943,
198,
11748,
27474,
198,
198,
28243,
796,
37227,
44755,
62,
39994,
28,
34720,
33223,
7785,
... | 2.114914 | 409 |
from django.db import models
from django.urls import reverse
from django.utils.functional import cached_property
from entities.models import Person, Institution
from .controlled_vocabs import *
class RepoObject(models.Model):
"""
An abstract class providing properties for subclasses.
"""
has_title = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasTitle",
help_text="Title or name of Collection."
)
description = models.TextField(
blank=True, null=True, verbose_name="acdh:hasDescription",
help_text="A verbose description of certain aspects of an entity. \
This is the most generic property, use more specific sub-properties where applicable."
)
acdh_id = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasIdentifier",
help_text="Unique identifier given by ACDH and used in ACDH systems,\
as well as identifiers with a stable URL or URI assigned by other parties"
)
checked = models.BooleanField(
blank=True, default=False, verbose_name="Checked",
help_text="Set to True if the Object passed your internal quality control"
)
has_license = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasLicense",
help_text="Denotes the license attached to an object."
)
has_category = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasCategory",
help_text="Type of resource, e. g. corpus. Choose from list.\
Can be refined with description, format, extent, etc.",
choices=RES_TYPE
)
has_lcs = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasLifeCycleStatus",
help_text="Indication if the Project, Collection or Resource (A) still in\
the making or completed? A verbose status description can\
be added with acdh:hasCompleteness",
choices=LCS
)
def copy_instance(self):
"""Saves a copy of the current object and returns it"""
obj = self
obj.id = None
obj.save()
return obj
class Collection(RepoObject):
"""
Mimiks acdh:Collection class:
Set of Repo Objects (Collections or Resources), much like folders in a file system.
A Collection can be optionally related to a Project (acdh:hasRelatedProject),
in which it was created or curated.
"""
part_of = models.ForeignKey(
'Collection', blank=True, null=True, verbose_name="acdh:isPartOf",
help_text="Indicates A is a part of aggregate B, \
e. g. elements of a series, items of a collection.", related_name="has_part",
on_delete=models.CASCADE
)
has_contributor = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasContributor",
help_text="Agent (person, group, organisation) (B) who was actively involved in \
creating/curating/editing a Resource, a Collection or in a Project (A).",
related_name="contributes_to_collection"
)
has_creator = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasCreator",
help_text="Person (B) responsible for creation of resource (A).\
Will be included in the citation.",
related_name="created_collection"
)
has_access_restriction = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasAccessRestriction",
help_text="Denotes if restricted access applies to the Resource (A).",
choices=ACCESS_RESTRICTIONS
)
@classmethod
@classmethod
@classmethod
class Resource(RepoObject):
"""
Mimiks acdh:Resource class:
Basic entity in the schema containing actual data / content payload; \
comparable (and mostly equivalent) to files in a file system.
"""
has_creator = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasContributor",
help_text="Agent (person, group, organisation) (B) who was actively involved in \
creating/curating/editing a Resource, a Collection or in a Project (A).",
related_name="created_resource"
)
has_contributor = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasContributor",
help_text="Agent (person, group, organisation) (B) who was actively involved in \
creating/curating/editing a Resource, a Collection or in a Project (A).",
related_name="contributes_to_resource"
)
has_filetype = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasFormat",
help_text="Format of a resource (A). Indicated as MIME type."
)
file_size = models.IntegerField(
blank=True, null=True, verbose_name="acdh:hasBinarySize",
help_text="Indicates size in bytes of a Resource or Collection"
)
part_of = models.ForeignKey(
'Collection', blank=True, null=True, verbose_name="acdh:isPartOf",
help_text="Indicates A is a part of aggregate B, \
e. g. elements of a series, items of a collection.", related_name="has_part_resource",
on_delete=models.CASCADE
)
has_access_restriction = models.CharField(
max_length=250, blank=True, verbose_name="acdh:hasAccessRestriction",
help_text="Denotes if restricted access applies to the Resource (A).",
choices=ACCESS_RESTRICTIONS
)
@classmethod
@classmethod
def inherit_properties(self):
""" fetches (some) properties of the part_of collection\
and saves it to the current object"""
license = self.part_of.has_license
if license:
self.has_license = license
self.save()
else:
license = None
category = self.part_of.has_category
if category:
self.has_category = category
self.save()
else:
category = None
lcs = self.part_of.has_lcs
if lcs:
self.has_lcs = lcs
self.save()
else:
lcs = None
access_restriction = self.part_of.has_access_restriction
if access_restriction:
self.has_access_restriction = access_restriction
self.save()
else:
access_restriction = None
creators = self.part_of.has_creator.all()
for x in creators:
self.has_creator.add(x)
contributors = self.part_of.has_contributor.all()
for x in contributors:
self.has_contributor.add(x)
return [creators, contributors, license, category, lcs, access_restriction]
@classmethod
class Project(RepoObject):
"""
Mimiks acdh:Project:
Effort or activity with defined goals and (normally) limited time scope, usually\
in collaborative setup with dedicated funding.
"""
has_principal = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasPrincipalInvestigator",
help_text="Person officially designated as head of project team or subproject \
team instrumental in the work necessary to development of the resource.",
related_name="is_principal"
)
has_contributor = models.ManyToManyField(
Person, blank=True, verbose_name="acdh:hasContributor",
help_text="Agent (person, group, organisation) (B) who was actively involved in \
creating/curating/editing a Resource, a Collection or in a Project (A).",
related_name="contributes_to_project"
)
has_start_date = models.DateField(
blank=True, null=True, verbose_name="acdh:hasStartDate",
help_text="Indicates the start date of a Project."
)
has_end_date = models.DateField(
blank=True, null=True, verbose_name="acdh:hasEndtDate",
help_text="Indicates the end date of a Project."
)
has_funder = models.ManyToManyField(
Institution, blank=True, verbose_name="acdh:hasFunder",
help_text="Organisation (B) which provided funding for the project (A).",
related_name="is_funding"
)
related_collection = models.ManyToManyField(
Collection, blank=True, verbose_name="acdh:hasRelatedCollection",
help_text="Indication of a project (B) associated with this resource or collection (A).",
related_name="has_related_project"
)
@classmethod
@classmethod
@classmethod
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
45124,
1330,
39986,
62,
26745,
198,
6738,
12066,
13,
27530,
1330,
7755,
11,
29426,
198,
6738,
764,
1440... | 2.596605 | 3,240 |
number0 = int(input("Primeiro número: "))
number1 = int(input("Segundo número: "))
x = 1
result = 0
while x <= number1:
r = result + number0
x = x + 1
print("%d x %d = %d" % (number0, number1, result))
| [
17618,
15,
796,
493,
7,
15414,
7203,
26405,
7058,
299,
21356,
647,
78,
25,
366,
4008,
198,
17618,
16,
796,
493,
7,
15414,
7203,
41030,
41204,
299,
21356,
647,
78,
25,
366,
4008,
198,
87,
796,
352,
198,
20274,
796,
657,
198,
4514,
... | 2.333333 | 90 |
def validate_time_settings(implicitly_wait, timeout, poll_frequency):
"""Verifies that implicitly_wait isn't larger than timeout or poll_frequency."""
if poll_frequency < implicitly_wait:
raise TypeError(
"Driver implicitly_wait {} is longer than poll_frequency {}".format(
implicitly_wait, poll_frequency
)
)
if timeout > 0 and timeout < implicitly_wait:
raise TypeError(
"Driver implicitly_wait {} is longer than timeout {}".format(
implicitly_wait, timeout
)
)
| [
4299,
26571,
62,
2435,
62,
33692,
7,
23928,
3628,
306,
62,
17077,
11,
26827,
11,
3278,
62,
35324,
2599,
198,
220,
220,
220,
37227,
13414,
6945,
326,
31821,
62,
17077,
2125,
470,
4025,
621,
26827,
393,
3278,
62,
35324,
526,
15931,
198,... | 2.495763 | 236 |
from django.core.management.base import BaseCommand
from usaspending_api.common.helpers.endpoint_documentation import (
CURRENT_ENDPOINT_PREFIXES,
get_endpoint_urls_doc_paths_and_docstrings,
get_endpoints_from_endpoints_markdown,
validate_docs,
)
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
514,
5126,
1571,
62,
15042,
13,
11321,
13,
16794,
364,
13,
437,
4122,
62,
22897,
341,
1330,
357,
198,
220,
220,
220,
327,
39237,
62,
1677,
6322,
46,
12... | 2.666667 | 99 |
# Generated by Django 3.2.7 on 2021-11-29 17:38
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
1157,
12,
1959,
1596,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from .helpers_test import TestConfigHelper
from .base_test import TestBaseCLI
| [
6738,
764,
16794,
364,
62,
9288,
1330,
6208,
16934,
47429,
198,
6738,
764,
8692,
62,
9288,
1330,
6208,
14881,
5097,
40,
198
] | 3.545455 | 22 |
# The MIT License (MIT)
# Copyright (c) 2014 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# disable (too-many-lines) check
# pylint: disable=C0302
"""Document client class for the Azure Cosmos database service.
"""
from typing import Dict, Any, Optional
import six
from urllib3.util.retry import Retry
from azure.core.paging import ItemPaged # type: ignore
from azure.core import PipelineClient # type: ignore
from azure.core.pipeline.policies import ( # type: ignore
HTTPPolicy,
ContentDecodePolicy,
HeadersPolicy,
UserAgentPolicy,
NetworkTraceLoggingPolicy,
CustomHookPolicy,
DistributedTracingPolicy,
ProxyPolicy)
from . import _base as base
from . import documents
from .documents import ConnectionPolicy
from . import _constants as constants
from . import http_constants
from . import _query_iterable as query_iterable
from . import _runtime_constants as runtime_constants
from . import _request_object
from . import _synchronized_request as synchronized_request
from . import _global_endpoint_manager as global_endpoint_manager
from ._routing import routing_map_provider
from ._retry_utility import ConnectionRetryPolicy
from . import _session
from . import _utils
from .partition_key import _Undefined, _Empty
# pylint: disable=protected-access
class CosmosClientConnection(object): # pylint: disable=too-many-public-methods,too-many-instance-attributes
"""Represents a document client.
Provides a client-side logical representation of the Azure Cosmos
service. This client is used to configure and execute requests against the
service.
The service client encapsulates the endpoint and credentials used to access
the Azure Cosmos service.
"""
# default number precisions
_DefaultNumberHashPrecision = 3
_DefaultNumberRangePrecision = -1
# default string precision
_DefaultStringHashPrecision = 3
_DefaultStringRangePrecision = -1
def __init__(
self,
url_connection, # type: str
auth, # type: Dict[str, Any]
connection_policy=None, # type: Optional[ConnectionPolicy]
consistency_level=documents.ConsistencyLevel.Session, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""
:param str url_connection:
The URL for connecting to the DB server.
:param dict auth:
Contains 'masterKey' or 'resourceTokens', where
auth['masterKey'] is the default authorization key to use to
create the client, and auth['resourceTokens'] is the alternative
authorization key.
:param documents.ConnectionPolicy connection_policy:
The connection policy for the client.
:param documents.ConsistencyLevel consistency_level:
The default consistency policy for client operations.
"""
self.url_connection = url_connection
self.master_key = None
self.resource_tokens = None
if auth is not None:
self.master_key = auth.get("masterKey")
self.resource_tokens = auth.get("resourceTokens")
if auth.get("permissionFeed"):
self.resource_tokens = {}
for permission_feed in auth["permissionFeed"]:
resource_parts = permission_feed["resource"].split("/")
id_ = resource_parts[-1]
self.resource_tokens[id_] = permission_feed["_token"]
self.connection_policy = connection_policy or ConnectionPolicy()
self.partition_resolvers = {} # type: Dict[str, Any]
self.partition_key_definition_cache = {} # type: Dict[str, Any]
self.default_headers = {
http_constants.HttpHeaders.CacheControl: "no-cache",
http_constants.HttpHeaders.Version: http_constants.Versions.CurrentVersion,
# For single partition query with aggregate functions we would try to accumulate the results on the SDK.
# We need to set continuation as not expected.
http_constants.HttpHeaders.IsContinuationExpected: False,
}
if consistency_level is not None:
self.default_headers[http_constants.HttpHeaders.ConsistencyLevel] = consistency_level
# Keeps the latest response headers from server.
self.last_response_headers = None
if consistency_level == documents.ConsistencyLevel.Session:
# create a session - this is maintained only if the default consistency level
# on the client is set to session, or if the user explicitly sets it as a property
# via setter
self.session = _session.Session(self.url_connection)
else:
self.session = None # type: ignore
self._useMultipleWriteLocations = False
self._global_endpoint_manager = global_endpoint_manager._GlobalEndpointManager(self)
retry_policy = None
if isinstance(self.connection_policy.ConnectionRetryConfiguration, HTTPPolicy):
retry_policy = self.connection_policy.ConnectionRetryConfiguration
elif isinstance(self.connection_policy.ConnectionRetryConfiguration, int):
retry_policy = ConnectionRetryPolicy(total=self.connection_policy.ConnectionRetryConfiguration)
elif isinstance(self.connection_policy.ConnectionRetryConfiguration, Retry):
# Convert a urllib3 retry policy to a Pipeline policy
retry_policy = ConnectionRetryPolicy(
retry_total=self.connection_policy.ConnectionRetryConfiguration.total,
retry_connect=self.connection_policy.ConnectionRetryConfiguration.connect,
retry_read=self.connection_policy.ConnectionRetryConfiguration.read,
retry_status=self.connection_policy.ConnectionRetryConfiguration.status,
retry_backoff_max=self.connection_policy.ConnectionRetryConfiguration.BACKOFF_MAX,
retry_on_status_codes=list(self.connection_policy.ConnectionRetryConfiguration.status_forcelist),
retry_backoff_factor=self.connection_policy.ConnectionRetryConfiguration.backoff_factor
)
else:
TypeError("Unsupported retry policy. Must be an azure.cosmos.ConnectionRetryPolicy, int, or urllib3.Retry")
proxies = kwargs.pop('proxies', {})
if self.connection_policy.ProxyConfiguration and self.connection_policy.ProxyConfiguration.Host:
host = self.connection_policy.ProxyConfiguration.Host
url = six.moves.urllib.parse.urlparse(host)
proxy = host if url.port else host + ":" + str(self.connection_policy.ProxyConfiguration.Port)
proxies.update({url.scheme : proxy})
policies = [
HeadersPolicy(**kwargs),
ProxyPolicy(proxies=proxies),
UserAgentPolicy(base_user_agent=_utils.get_user_agent(), **kwargs),
ContentDecodePolicy(),
retry_policy,
CustomHookPolicy(**kwargs),
DistributedTracingPolicy(),
NetworkTraceLoggingPolicy(**kwargs),
]
transport = kwargs.pop("transport", None)
self.pipeline_client = PipelineClient(base_url=url_connection, transport=transport, policies=policies)
# Query compatibility mode.
# Allows to specify compatibility mode used by client when making query requests. Should be removed when
# application/sql is no longer supported.
self._query_compatibility_mode = CosmosClientConnection._QueryCompatibilityMode.Default
# Routing map provider
self._routing_map_provider = routing_map_provider.SmartRoutingMapProvider(self)
database_account = self._global_endpoint_manager._GetDatabaseAccount(**kwargs)
self._global_endpoint_manager.force_refresh(database_account)
@property
def Session(self):
"""Gets the session object from the client. """
return self.session
@Session.setter
def Session(self, session):
"""Sets a session object on the document client.
This will override the existing session
"""
self.session = session
@property
def WriteEndpoint(self):
"""Gets the curent write endpoint for a geo-replicated database account.
"""
return self._global_endpoint_manager.get_write_endpoint()
@property
def ReadEndpoint(self):
"""Gets the curent read endpoint for a geo-replicated database account.
"""
return self._global_endpoint_manager.get_read_endpoint()
def RegisterPartitionResolver(self, database_link, partition_resolver):
"""Registers the partition resolver associated with the database link
:param str database_link:
Database Self Link or ID based link.
:param object partition_resolver:
An instance of PartitionResolver.
"""
if not database_link:
raise ValueError("database_link is None or empty.")
if partition_resolver is None:
raise ValueError("partition_resolver is None.")
self.partition_resolvers = {base.TrimBeginningAndEndingSlashes(database_link): partition_resolver}
def GetPartitionResolver(self, database_link):
"""Gets the partition resolver associated with the database link
:param str database_link:
Database self link or ID based link.
:return:
An instance of PartitionResolver.
:rtype: object
"""
if not database_link:
raise ValueError("database_link is None or empty.")
return self.partition_resolvers.get(base.TrimBeginningAndEndingSlashes(database_link))
def CreateDatabase(self, database, options=None, **kwargs):
"""Creates a database.
:param dict database:
The Azure Cosmos database to create.
:param dict options:
The request options for the request.
:return:
The Database that was created.
:rtype: dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(database)
path = "/dbs"
return self.Create(database, path, "dbs", None, None, options, **kwargs)
def ReadDatabase(self, database_link, options=None, **kwargs):
"""Reads a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return:
The Database that was read.
:rtype: dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link)
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.Read(path, "dbs", database_id, None, options, **kwargs)
def ReadDatabases(self, options=None, **kwargs):
"""Reads all databases.
:param dict options:
The request options for the request.
:return:
Query Iterable of Databases.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryDatabases(None, options, **kwargs)
def QueryDatabases(self, query, options=None, **kwargs):
"""Queries databases.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return: Query Iterable of Databases.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def ReadContainers(self, database_link, options=None, **kwargs):
"""Reads all collections in a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return: Query Iterable of Collections.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryContainers(database_link, None, options, **kwargs)
def QueryContainers(self, database_link, query, options=None, **kwargs):
"""Queries collections in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return: Query Iterable of Collections.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link, "colls")
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateContainer(self, database_link, collection, options=None, **kwargs):
"""Creates a collection in a database.
:param str database_link:
The link to the database.
:param dict collection:
The Azure Cosmos collection to create.
:param dict options:
The request options for the request.
:return: The Collection that was created.
:rtype: dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(collection)
path = base.GetPathFromLink(database_link, "colls")
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.Create(collection, path, "colls", database_id, None, options, **kwargs)
def ReplaceContainer(self, collection_link, collection, options=None, **kwargs):
"""Replaces a collection and return it.
:param str collection_link:
The link to the collection entity.
:param dict collection:
The collection to be used.
:param dict options:
The request options for the request.
:return:
The new Collection.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(collection)
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.Replace(collection, path, "colls", collection_id, None, options, **kwargs)
def ReadContainer(self, collection_link, options=None, **kwargs):
"""Reads a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
The read Collection.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.Read(path, "colls", collection_id, None, options, **kwargs)
def CreateUser(self, database_link, user, options=None, **kwargs):
"""Creates a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to create.
:param dict options:
The request options for the request.
:return:
The created User.
:rtype:
dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Create(user, path, "users", database_id, None, options, **kwargs)
def UpsertUser(self, database_link, user, options=None, **kwargs):
"""Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Upsert(user, path, "users", database_id, None, options, **kwargs)
def ReadUser(self, user_link, options=None, **kwargs):
"""Reads a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The read User.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.Read(path, "users", user_id, None, options, **kwargs)
def ReadUsers(self, database_link, options=None, **kwargs):
"""Reads all users in a database.
:params str database_link:
The link to the database.
:params dict options:
The request options for the request.
:return:
Query iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUsers(database_link, None, options, **kwargs)
def QueryUsers(self, database_link, query, options=None, **kwargs):
"""Queries users in a database.
:param str database_link:
The link to the database.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Users.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link, "users")
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def DeleteDatabase(self, database_link, options=None, **kwargs):
"""Deletes a database.
:param str database_link:
The link to the database.
:param dict options:
The request options for the request.
:return:
The deleted Database.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(database_link)
database_id = base.GetResourceIdOrFullNameFromLink(database_link)
return self.DeleteResource(path, "dbs", database_id, None, options, **kwargs)
def CreatePermission(self, user_link, permission, options=None, **kwargs):
"""Creates a permission for a user.
:param str user_link:
The link to the user entity.
:param dict permission:
The Azure Cosmos user permission to create.
:param dict options:
The request options for the request.
:return:
The created Permission.
:rtype:
dict
"""
if options is None:
options = {}
path, user_id = self._GetUserIdWithPathForPermission(permission, user_link)
return self.Create(permission, path, "permissions", user_id, None, options, **kwargs)
def UpsertPermission(self, user_link, permission, options=None, **kwargs):
"""Upserts a permission for a user.
:param str user_link:
The link to the user entity.
:param dict permission:
The Azure Cosmos user permission to upsert.
:param dict options:
The request options for the request.
:return:
The upserted permission.
:rtype:
dict
"""
if options is None:
options = {}
path, user_id = self._GetUserIdWithPathForPermission(permission, user_link)
return self.Upsert(permission, path, "permissions", user_id, None, options, **kwargs)
def ReadPermission(self, permission_link, options=None, **kwargs):
"""Reads a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The read permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Read(path, "permissions", permission_id, None, options, **kwargs)
def ReadPermissions(self, user_link, options=None, **kwargs):
"""Reads all permissions for a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
Query Iterable of Permissions.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryPermissions(user_link, None, options, **kwargs)
def QueryPermissions(self, user_link, query, options=None, **kwargs):
"""Queries permissions for a user.
:param str user_link:
The link to the user entity.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Permissions.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link, "permissions")
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def ReplaceUser(self, user_link, user, options=None, **kwargs):
"""Replaces a user and return it.
:param str user_link:
The link to the user entity.
:param dict user:
:param dict options:
The request options for the request.
:return:
The new User.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(user)
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.Replace(user, path, "users", user_id, None, options, **kwargs)
def DeleteUser(self, user_link, options=None, **kwargs):
"""Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.DeleteResource(path, "users", user_id, None, options, **kwargs)
def ReplacePermission(self, permission_link, permission, options=None, **kwargs):
"""Replaces a permission and return it.
:param str permission_link:
The link to the permission.
:param dict permission:
:param dict options:
The request options for the request.
:return:
The new Permission.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(permission)
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.Replace(permission, path, "permissions", permission_id, None, options, **kwargs)
def DeletePermission(self, permission_link, options=None, **kwargs):
"""Deletes a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The deleted Permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.DeleteResource(path, "permissions", permission_id, None, options, **kwargs)
def ReadItems(self, collection_link, feed_options=None, response_hook=None, **kwargs):
"""Reads all documents in a collection.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self.QueryItems(collection_link, None, feed_options, response_hook=response_hook, **kwargs)
def QueryItems(
self,
database_or_container_link,
query,
options=None,
partition_key=None,
response_hook=None,
**kwargs
):
"""Queries documents in a collection.
:param str database_or_container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:param response_hook:
A callable invoked with the response metadata
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
database_or_container_link = base.TrimBeginningAndEndingSlashes(database_or_container_link)
if options is None:
options = {}
if base.IsDatabaseLink(database_or_container_link):
return ItemPaged(
self,
query,
options,
database_link=database_or_container_link,
partition_key=partition_key,
page_iterator_class=query_iterable.QueryIterable
)
path = base.GetPathFromLink(database_or_container_link, "docs")
collection_id = base.GetResourceIdOrFullNameFromLink(database_or_container_link)
return ItemPaged(
self,
query,
options,
fetch_function=fetch_fn,
collection_link=database_or_container_link,
page_iterator_class=query_iterable.QueryIterable
)
def QueryItemsChangeFeed(self, collection_link, options=None, response_hook=None, **kwargs):
"""Queries documents change feed in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
options may also specify partition key range id.
:param response_hook:
A callable invoked with the response metadata
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
partition_key_range_id = None
if options is not None and "partitionKeyRangeId" in options:
partition_key_range_id = options["partitionKeyRangeId"]
return self._QueryChangeFeed(
collection_link, "Documents", options, partition_key_range_id, response_hook=response_hook, **kwargs
)
def _QueryChangeFeed(
self, collection_link, resource_type, options=None, partition_key_range_id=None, response_hook=None, **kwargs
):
"""Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:param response_hook:
A callable invoked with the response metadata
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
options["changeFeed"] = True
resource_key_map = {"Documents": "docs"}
# For now, change feed only supports Documents and Partition Key Range resouce type
if resource_type not in resource_key_map:
raise NotImplementedError(resource_type + " change feed query is not supported.")
resource_key = resource_key_map[resource_type]
path = base.GetPathFromLink(collection_link, resource_key)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self,
None,
options,
fetch_function=fetch_fn,
collection_link=collection_link,
page_iterator_class=query_iterable.QueryIterable
)
def _ReadPartitionKeyRanges(self, collection_link, feed_options=None, **kwargs):
"""Reads Partition Key Ranges.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self._QueryPartitionKeyRanges(collection_link, None, feed_options, **kwargs)
def _QueryPartitionKeyRanges(self, collection_link, query, options=None, **kwargs):
"""Queries Partition Key Ranges in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "pkranges")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateItem(self, database_or_container_link, document, options=None, **kwargs):
"""Creates a document in a collection.
:param str database_or_container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to create.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The created Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined,
# not each time the function is called (like it is in say, Ruby). This means
# that if you use a mutable default argument and mutate it, you will and have
# mutated that object for all future calls to the function as well. So, using
# a non-mutable deafult in this case(None) and assigning an empty dict(mutable)
# inside the method For more details on this gotcha, please refer
# http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database
# link in case of client side partitioning
if base.IsItemContainerLink(database_or_container_link):
options = self._AddPartitionKey(database_or_container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(
database_or_container_link, document, options
)
return self.Create(document, path, "docs", collection_id, None, options, **kwargs)
def UpsertItem(self, database_or_container_link, document, options=None, **kwargs):
"""Upserts a document in a collection.
:param str database_or_container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to upsert.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The upserted Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined,
# not each time the function is called (like it is in say, Ruby). This means
# that if you use a mutable default argument and mutate it, you will and have
# mutated that object for all future calls to the function as well. So, using
# a non-mutable deafult in this case(None) and assigning an empty dict(mutable)
# inside the method For more details on this gotcha, please refer
# http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database
# link in case of client side partitioning
if base.IsItemContainerLink(database_or_container_link):
options = self._AddPartitionKey(database_or_container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(
database_or_container_link, document, options
)
return self.Upsert(document, path, "docs", collection_id, None, options, **kwargs)
PartitionResolverErrorMessage = (
"Couldn't find any partition resolvers for the database link provided. "
+ "Ensure that the link you used when registering the partition resolvers "
+ "matches the link provided or you need to register both types of database "
+ "link(self link as well as ID based link)."
)
# Gets the collection id and path for the document
def ReadItem(self, document_link, options=None, **kwargs):
"""Reads a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
The read Document.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return self.Read(path, "docs", document_id, None, options, **kwargs)
def ReadTriggers(self, collection_link, options=None, **kwargs):
"""Reads all triggers in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryTriggers(collection_link, None, options, **kwargs)
def QueryTriggers(self, collection_link, query, options=None, **kwargs):
"""Queries triggers in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "triggers")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateTrigger(self, collection_link, trigger, options=None, **kwargs):
"""Creates a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The created Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Create(trigger, path, "triggers", collection_id, None, options, **kwargs)
def UpsertTrigger(self, collection_link, trigger, options=None, **kwargs):
"""Upserts a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The upserted Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Upsert(trigger, path, "triggers", collection_id, None, options, **kwargs)
def ReadTrigger(self, trigger_link, options=None, **kwargs):
"""Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, "triggers", trigger_id, None, options, **kwargs)
def ReadUserDefinedFunctions(self, collection_link, options=None, **kwargs):
"""Reads all user-defined functions in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of UDFs.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryUserDefinedFunctions(collection_link, None, options, **kwargs)
def QueryUserDefinedFunctions(self, collection_link, query, options=None, **kwargs):
"""Queries user-defined functions in a collection.
:param str collection_link:
The link to the collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of UDFs.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "udfs")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateUserDefinedFunction(self, collection_link, udf, options=None, **kwargs):
"""Creates a user-defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The created UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Create(udf, path, "udfs", collection_id, None, options, **kwargs)
def UpsertUserDefinedFunction(self, collection_link, udf, options=None, **kwargs):
"""Upserts a user-defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf, path, "udfs", collection_id, None, options, **kwargs)
def ReadUserDefinedFunction(self, udf_link, options=None, **kwargs):
"""Reads a user-defined function.
:param str udf_link:
The link to the user-defined function.
:param dict options:
The request options for the request.
:return:
The read UDF.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Read(path, "udfs", udf_id, None, options, **kwargs)
def ReadStoredProcedures(self, collection_link, options=None, **kwargs):
"""Reads all store procedures in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Stored Procedures.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryStoredProcedures(collection_link, None, options, **kwargs)
def QueryStoredProcedures(self, collection_link, query, options=None, **kwargs):
"""Queries stored procedures in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Stored Procedures.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "sprocs")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateStoredProcedure(self, collection_link, sproc, options=None, **kwargs):
"""Creates a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The created Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Create(sproc, path, "sprocs", collection_id, None, options, **kwargs)
def UpsertStoredProcedure(self, collection_link, sproc, options=None, **kwargs):
"""Upserts a stored procedure in a collection.
:param str collection_link:
The link to the document collection.
:param str sproc:
:param dict options:
The request options for the request.
:return:
The upserted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc)
return self.Upsert(sproc, path, "sprocs", collection_id, None, options, **kwargs)
def ReadStoredProcedure(self, sproc_link, options=None, **kwargs):
"""Reads a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The read Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Read(path, "sprocs", sproc_id, None, options, **kwargs)
def ReadConflicts(self, collection_link, feed_options=None, **kwargs):
"""Reads conflicts.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self.QueryConflicts(collection_link, None, feed_options, **kwargs)
def QueryConflicts(self, collection_link, query, options=None, **kwargs):
"""Queries conflicts in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "conflicts")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def ReadConflict(self, conflict_link, options=None, **kwargs):
"""Reads a conflict.
:param str conflict_link:
The link to the conflict.
:param dict options:
:return:
The read Conflict.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(conflict_link)
conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link)
return self.Read(path, "conflicts", conflict_id, None, options, **kwargs)
def DeleteContainer(self, collection_link, options=None, **kwargs):
"""Deletes a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
The deleted Collection.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.DeleteResource(path, "colls", collection_id, None, options, **kwargs)
def ReplaceItem(self, document_link, new_document, options=None, **kwargs):
"""Replaces a document and returns it.
:param str document_link:
The link to the document.
:param dict new_document:
:param dict options:
The request options for the request.
:return:
The new Document.
:rtype:
dict
"""
CosmosClientConnection.__ValidateResource(new_document)
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
# Python's default arguments are evaluated once when the function is defined,
# not each time the function is called (like it is in say, Ruby). This means
# that if you use a mutable default argument and mutate it, you will and have
# mutated that object for all future calls to the function as well. So, using
# a non-mutable deafult in this case(None) and assigning an empty dict(mutable)
# inside the function so that it remains local For more details on this gotcha,
# please refer http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# Extract the document collection link and add the partition key to options
collection_link = base.GetItemContainerLink(document_link)
options = self._AddPartitionKey(collection_link, new_document, options)
return self.Replace(new_document, path, "docs", document_id, None, options, **kwargs)
def DeleteItem(self, document_link, options=None, **kwargs):
"""Deletes a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
The deleted Document.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return self.DeleteResource(path, "docs", document_id, None, options, **kwargs)
def ReplaceTrigger(self, trigger_link, trigger, options=None, **kwargs):
"""Replaces a trigger and returns it.
:param str trigger_link:
The link to the trigger.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The replaced Trigger.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(trigger)
trigger = trigger.copy()
if trigger.get("serverScript"):
trigger["body"] = str(trigger["serverScript"])
elif trigger.get("body"):
trigger["body"] = str(trigger["body"])
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Replace(trigger, path, "triggers", trigger_id, None, options, **kwargs)
def DeleteTrigger(self, trigger_link, options=None, **kwargs):
"""Deletes a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The deleted Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.DeleteResource(path, "triggers", trigger_id, None, options, **kwargs)
def ReplaceUserDefinedFunction(self, udf_link, udf, options=None, **kwargs):
"""Replaces a user-defined function and returns it.
:param str udf_link:
The link to the user-defined function.
:param dict udf:
:param dict options:
The request options for the request.
:return:
The new UDF.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(udf)
udf = udf.copy()
if udf.get("serverScript"):
udf["body"] = str(udf["serverScript"])
elif udf.get("body"):
udf["body"] = str(udf["body"])
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Replace(udf, path, "udfs", udf_id, None, options, **kwargs)
def DeleteUserDefinedFunction(self, udf_link, options=None, **kwargs):
"""Deletes a user-defined function.
:param str udf_link:
The link to the user-defined function.
:param dict options:
The request options for the request.
:return:
The deleted UDF.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.DeleteResource(path, "udfs", udf_id, None, options, **kwargs)
def ExecuteStoredProcedure(self, sproc_link, params, options=None, **kwargs):
"""Executes a store procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict params:
List or None
:param dict options:
The request options for the request.
:return:
The Stored Procedure response.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = dict(self.default_headers)
initial_headers.update({http_constants.HttpHeaders.Accept: (runtime_constants.MediaTypes.Json)})
if params and not isinstance(params, list):
params = [params]
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
headers = base.GetHeaders(self, initial_headers, "post", path, sproc_id, "sprocs", options)
# ExecuteStoredProcedure will use WriteEndpoint since it uses POST operation
request_params = _request_object.RequestObject("sprocs", documents._OperationType.ExecuteJavaScript)
result, self.last_response_headers = self.__Post(path, request_params, params, headers, **kwargs)
return result
def ReplaceStoredProcedure(self, sproc_link, sproc, options=None, **kwargs):
"""Replaces a stored procedure and returns it.
:param str sproc_link:
The link to the stored procedure.
:param dict sproc:
:param dict options:
The request options for the request.
:return:
The replaced Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
CosmosClientConnection.__ValidateResource(sproc)
sproc = sproc.copy()
if sproc.get("serverScript"):
sproc["body"] = str(sproc["serverScript"])
elif sproc.get("body"):
sproc["body"] = str(sproc["body"])
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Replace(sproc, path, "sprocs", sproc_id, None, options, **kwargs)
def DeleteStoredProcedure(self, sproc_link, options=None, **kwargs):
"""Deletes a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The deleted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.DeleteResource(path, "sprocs", sproc_id, None, options, **kwargs)
def DeleteConflict(self, conflict_link, options=None, **kwargs):
"""Deletes a conflict.
:param str conflict_link:
The link to the conflict.
:param dict options:
The request options for the request.
:return:
The deleted Conflict.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(conflict_link)
conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link)
return self.DeleteResource(path, "conflicts", conflict_id, None, options, **kwargs)
def ReplaceOffer(self, offer_link, offer, **kwargs):
"""Replaces an offer and returns it.
:param str offer_link:
The link to the offer.
:param dict offer:
:return:
The replaced Offer.
:rtype:
dict
"""
CosmosClientConnection.__ValidateResource(offer)
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Replace(offer, path, "offers", offer_id, None, None, **kwargs)
def ReadOffer(self, offer_link, **kwargs):
"""Reads an offer.
:param str offer_link:
The link to the offer.
:return:
The read Offer.
:rtype:
dict
"""
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Read(path, "offers", offer_id, None, {}, **kwargs)
def ReadOffers(self, options=None, **kwargs):
"""Reads all offers.
:param dict options:
The request options for the request
:return:
Query Iterable of Offers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryOffers(None, options, **kwargs)
def QueryOffers(self, query, options=None, **kwargs):
"""Query for all offers.
:param (str or dict) query:
:param dict options:
The request options for the request
:return:
Query Iterable of Offers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def GetDatabaseAccount(self, url_connection=None, **kwargs):
"""Gets database account info.
:return:
The Database Account.
:rtype:
documents.DatabaseAccount
"""
if url_connection is None:
url_connection = self.url_connection
initial_headers = dict(self.default_headers)
headers = base.GetHeaders(self, initial_headers, "get", "", "", "", {}) # path # id # type
request_params = _request_object.RequestObject("databaseaccount", documents._OperationType.Read, url_connection)
result, self.last_response_headers = self.__Get("", request_params, headers, **kwargs)
database_account = documents.DatabaseAccount()
database_account.DatabasesLink = "/dbs/"
database_account.MediaLink = "/media/"
if http_constants.HttpHeaders.MaxMediaStorageUsageInMB in self.last_response_headers:
database_account.MaxMediaStorageUsageInMB = self.last_response_headers[
http_constants.HttpHeaders.MaxMediaStorageUsageInMB
]
if http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in self.last_response_headers:
database_account.CurrentMediaStorageUsageInMB = self.last_response_headers[
http_constants.HttpHeaders.CurrentMediaStorageUsageInMB
]
database_account.ConsistencyPolicy = result.get(constants._Constants.UserConsistencyPolicy)
# WritableLocations and ReadableLocations fields will be available only for geo-replicated database accounts
if constants._Constants.WritableLocations in result:
database_account._WritableLocations = result[constants._Constants.WritableLocations]
if constants._Constants.ReadableLocations in result:
database_account._ReadableLocations = result[constants._Constants.ReadableLocations]
if constants._Constants.EnableMultipleWritableLocations in result:
database_account._EnableMultipleWritableLocations = result[
constants._Constants.EnableMultipleWritableLocations
]
self._useMultipleWriteLocations = (
self.connection_policy.UseMultipleWriteLocations and database_account._EnableMultipleWritableLocations
)
return database_account
def Create(self, body, path, typ, id, initial_headers, options=None, **kwargs): # pylint: disable=redefined-builtin
"""Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str typ:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options)
# Create will use WriteEndpoint since it uses POST operation
request_params = _request_object.RequestObject(typ, documents._OperationType.Create)
result, self.last_response_headers = self.__Post(path, request_params, body, headers, **kwargs)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Upsert(self, body, path, typ, id, initial_headers, options=None, **kwargs): # pylint: disable=redefined-builtin
"""Upserts a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str typ:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The upserted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options)
headers[http_constants.HttpHeaders.IsUpsert] = True
# Upsert will use WriteEndpoint since it uses POST operation
request_params = _request_object.RequestObject(typ, documents._OperationType.Upsert)
result, self.last_response_headers = self.__Post(path, request_params, body, headers, **kwargs)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Replace(self, resource, path, typ, id, initial_headers, options=None, **kwargs): # pylint: disable=redefined-builtin
"""Replaces a Azure Cosmos resource and returns it.
:param dict resource:
:param str path:
:param str typ:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The new Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, "put", path, id, typ, options)
# Replace will use WriteEndpoint since it uses PUT operation
request_params = _request_object.RequestObject(typ, documents._OperationType.Replace)
result, self.last_response_headers = self.__Put(path, request_params, resource, headers, **kwargs)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def Read(self, path, typ, id, initial_headers, options=None, **kwargs): # pylint: disable=redefined-builtin
"""Reads a Azure Cosmos resource and returns it.
:param str path:
:param str typ:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The upserted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, "get", path, id, typ, options)
# Read will use ReadEndpoint since it uses GET operation
request_params = _request_object.RequestObject(typ, documents._OperationType.Read)
result, self.last_response_headers = self.__Get(path, request_params, headers, **kwargs)
return result
def DeleteResource(self, path, typ, id, initial_headers, options=None, **kwargs): # pylint: disable=redefined-builtin
"""Deletes a Azure Cosmos resource and returns it.
:param str path:
:param str typ:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The deleted Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self, initial_headers, "delete", path, id, typ, options)
# Delete will use WriteEndpoint since it uses DELETE operation
request_params = _request_object.RequestObject(typ, documents._OperationType.Delete)
result, self.last_response_headers = self.__Delete(path, request_params, headers, **kwargs)
# update session for request mutates data on server side
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result
def __Get(self, path, request_params, req_headers, **kwargs):
"""Azure Cosmos 'GET' http request.
:params str url:
:params str path:
:params dict req_headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
request = self.pipeline_client.get(url=path, headers=req_headers)
return synchronized_request.SynchronizedRequest(
client=self,
request_params=request_params,
global_endpoint_manager=self._global_endpoint_manager,
connection_policy=self.connection_policy,
pipeline_client=self.pipeline_client,
request=request,
request_data=None,
**kwargs
)
def __Post(self, path, request_params, body, req_headers, **kwargs):
"""Azure Cosmos 'POST' http request.
:params str url:
:params str path:
:params (str, unicode, dict) body:
:params dict req_headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
request = self.pipeline_client.post(url=path, headers=req_headers)
return synchronized_request.SynchronizedRequest(
client=self,
request_params=request_params,
global_endpoint_manager=self._global_endpoint_manager,
connection_policy=self.connection_policy,
pipeline_client=self.pipeline_client,
request=request,
request_data=body,
**kwargs
)
def __Put(self, path, request_params, body, req_headers, **kwargs):
"""Azure Cosmos 'PUT' http request.
:params str url:
:params str path:
:params (str, unicode, dict) body:
:params dict req_headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
request = self.pipeline_client.put(url=path, headers=req_headers)
return synchronized_request.SynchronizedRequest(
client=self,
request_params=request_params,
global_endpoint_manager=self._global_endpoint_manager,
connection_policy=self.connection_policy,
pipeline_client=self.pipeline_client,
request=request,
request_data=body,
**kwargs
)
def __Delete(self, path, request_params, req_headers, **kwargs):
"""Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict req_headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
request = self.pipeline_client.delete(url=path, headers=req_headers)
return synchronized_request.SynchronizedRequest(
client=self,
request_params=request_params,
global_endpoint_manager=self._global_endpoint_manager,
connection_policy=self.connection_policy,
pipeline_client=self.pipeline_client,
request=request,
request_data=None,
**kwargs
)
def QueryFeed(self, path, collection_id, query, options, partition_key_range_id=None, **kwargs):
"""Query Feed for Document Collection resource.
:param str path:
Path to the document collection.
:param str collection_id:
Id of the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Partition key range id.
:rtype:
tuple
"""
return (
self.__QueryFeed(
path,
"docs",
collection_id,
lambda r: r["Documents"],
lambda _, b: b,
query,
options,
partition_key_range_id,
**kwargs
),
self.last_response_headers,
)
def __QueryFeed(
self,
path,
typ,
id_,
result_fn,
create_fn,
query,
options=None,
partition_key_range_id=None,
response_hook=None,
is_query_plan=False,
**kwargs
):
"""Query for more than one Azure Cosmos resources.
:param str path:
:param str typ:
:param str id_:
:param function result_fn:
:param function create_fn:
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:param function response_hook:
:param bool is_query_plan:
Specififes if the call is to fetch query plan
:rtype:
list
:raises SystemError: If the query compatibility mode is undefined.
"""
if options is None:
options = {}
if query:
__GetBodiesFromQueryResult = result_fn
else:
initial_headers = self.default_headers.copy()
# Copy to make sure that default_headers won't be changed.
if query is None:
# Query operations will use ReadEndpoint even though it uses GET(for feed requests)
request_params = _request_object.RequestObject(typ,
documents._OperationType.QueryPlan if is_query_plan else documents._OperationType.ReadFeed)
headers = base.GetHeaders(self, initial_headers, "get", path, id_, typ, options, partition_key_range_id)
result, self.last_response_headers = self.__Get(path, request_params, headers, **kwargs)
if response_hook:
response_hook(self.last_response_headers, result)
return __GetBodiesFromQueryResult(result)
query = self.__CheckAndUnifyQueryFormat(query)
initial_headers[http_constants.HttpHeaders.IsQuery] = "true"
if not is_query_plan:
initial_headers[http_constants.HttpHeaders.IsQuery] = "true"
if (
self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.Default
or self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.Query
):
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.QueryJson
elif self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.SqlQuery:
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.SQL
else:
raise SystemError("Unexpected query compatibility mode.")
# Query operations will use ReadEndpoint even though it uses POST(for regular query operations)
request_params = _request_object.RequestObject(typ, documents._OperationType.SqlQuery)
req_headers = base.GetHeaders(self, initial_headers, "post", path, id_, typ, options, partition_key_range_id)
result, self.last_response_headers = self.__Post(path, request_params, query, req_headers, **kwargs)
if response_hook:
response_hook(self.last_response_headers, result)
return __GetBodiesFromQueryResult(result)
def __CheckAndUnifyQueryFormat(self, query_body):
"""Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string
"""
if (
self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.Default
or self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.Query
):
if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types):
raise TypeError("query body must be a dict or string.")
if isinstance(query_body, dict) and not query_body.get("query"):
raise ValueError('query body must have valid query text with key "query".')
if isinstance(query_body, six.string_types):
return {"query": query_body}
elif (
self._query_compatibility_mode == CosmosClientConnection._QueryCompatibilityMode.SqlQuery
and not isinstance(query_body, six.string_types)
):
raise TypeError("query body must be a string.")
else:
raise SystemError("Unexpected query compatibility mode.")
return query_body
@staticmethod
# Adds the partition key to options
# Extracts the partition key from the document using the partitionKey definition
# Navigates the document to retrieve the partitionKey specified in the partition key parts
def _UpdateSessionIfRequired(self, request_headers, response_result, response_headers):
"""
Updates session if necessary.
:param dict response_result:
:param dict response_headers:
:param dict response_headers
:return:
None, but updates the client session if necessary.
"""
# if this request was made with consistency level as session, then update the session
if response_result is None or response_headers is None:
return
is_session_consistency = False
if http_constants.HttpHeaders.ConsistencyLevel in request_headers:
if documents.ConsistencyLevel.Session == request_headers[http_constants.HttpHeaders.ConsistencyLevel]:
is_session_consistency = True
if is_session_consistency:
# update session
self.session.update_session(response_result, response_headers)
@staticmethod
| [
171,
119,
123,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
15069,
357,
66,
8,
1946,
5413,
10501,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
... | 2.408347 | 32,443 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import BitcoinTestFramework
from test_framework.mininode import (
nuparams,
fundingstream,
)
from test_framework.util import (
assert_equal,
bitcoind_processes,
connect_nodes,
initialize_chain_clean,
start_node,
BLOSSOM_BRANCH_ID,
HEARTWOOD_BRANCH_ID,
CANOPY_BRANCH_ID,
)
if __name__ == '__main__':
CoinbaseFundingStreamsTest().main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
12131,
383,
1168,
30350,
6505,
198,
2,
4307,
6169,
739,
262,
17168,
3788,
5964,
11,
766,
262,
19249,
198,
2,
2393,
27975,
45761,
393,
3740,
1378,
2503,
13,
... | 2.747788 | 226 |
import re
import datetime as dt
| [
11748,
302,
198,
11748,
4818,
8079,
355,
288,
83,
628,
628,
628
] | 3.083333 | 12 |
#! /usr/bin/env python3
# Copyright (c) 2021 Grumpy Cat Software S.L.
#
# This Source Code is licensed under the MIT 2.0 license.
# the terms can be found in LICENSE.md at the root of
# this project, or at http://mozilla.org/MPL/2.0/.
# %%
# 7f316466-ba56-4680-b178-d9138aec6d16
import matplotlib.pyplot as plt
import shapelets.compute as sc
from shapelets.data import load_dataset
import warnings
warnings.filterwarnings("ignore")
day_ahead_prices = load_dataset('day_ahead_prices')
solar_forecast = load_dataset('solar_forecast')
fig, ax = plt.subplots(2, figsize=(18, 16), sharex=True)
ax[0].plot(day_ahead_prices)
ax[0].set_title('Day-ahead enery prices')
ax[0].set_ylabel("EUR/MW")
ax[1].plot(solar_forecast)
ax[1].set_title('Solar Production forecast')
ax[1].set_ylabel("MW")
ax[1].set_xlabel("Day of year (1h freq)")
plt.show()
# %%
gb = 24
day_ahead_prices_by_day = sc.unpack(day_ahead_prices, gb, 1, gb, 1)
solar_forecast_by_day = sc.unpack(solar_forecast, gb, 1, gb, 1)
fig, ax = plt.subplots(2, figsize=(18, 16), sharex=True)
img1 = ax[0].imshow(day_ahead_prices_by_day, cmap='viridis', aspect='auto')
ax[0].set_title('Day-ahead enery prices')
ax[0].set_ylabel("Hour slot")
fig.colorbar(img1, ax=ax[0])
img2 = ax[1].imshow(solar_forecast_by_day, cmap='viridis', aspect='auto')
ax[1].set_title('Solar Production forecast')
ax[1].set_ylabel("Hour slot")
ax[1].set_xlabel("Day of year")
fig.colorbar(img2, ax=ax[1])
plt.show()
# %%
fig, ax = plt.subplots(2, 2, figsize=(24, 16), sharex=True)
img1 = ax[0, 0].imshow(sc.diff1(day_ahead_prices_by_day), cmap='viridis', aspect='auto')
ax[0, 0].set_title('Diff1 day-ahead prices')
ax[0, 0].set_ylabel("Hour slot")
fig.colorbar(img1, ax=ax[0, 0])
img2 = ax[0, 1].imshow(sc.diff2(day_ahead_prices_by_day), cmap='viridis', aspect='auto')
ax[0, 1].set_title('Diff2 day-ahead prices')
fig.colorbar(img2, ax=ax[0, 1])
img3 = ax[1, 0].imshow(sc.diff1(solar_forecast_by_day), cmap='viridis', aspect='auto')
ax[1, 0].set_title('Diff1 solar forecast')
ax[1, 0].set_ylabel("Hour slot")
ax[1, 0].set_xlabel("Day of year")
fig.colorbar(img3, ax=ax[1, 0])
img4 = ax[1, 1].imshow(sc.diff2(solar_forecast_by_day), cmap='viridis', aspect='auto')
ax[1, 1].set_title('Diff2 solar forecast')
ax[1, 1].set_xlabel("Day of year")
fig.colorbar(img4, ax=ax[1, 1])
plt.show()
# %%
filter = sc.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]
], dtype="float32")
filter /= sc.sum(filter)
r = sc.convolve2(solar_forecast_by_day, filter, 'default')
plt.imshow(r, cmap='magma', aspect='auto')
plt.colorbar()
plt.show()
rr = sc.pack(r, r.size, 1, gb, 1, gb, 1)
fig, ax = plt.subplots(figsize=(18, 8))
ax.plot(rr)
plt.show()
points = 24 * 7
fig, ax = plt.subplots(2, 1, figsize=(18, 8))
ax[0].plot(rr[:points])
ax[1].plot(solar_forecast_by_day[:points])
plt.show()
fig, ax = plt.subplots(2, 1, figsize=(18, 8))
ax[0].plot(sc.fft.spectral_derivative(rr[:points]))
ax[1].plot(sc.fft.spectral_derivative(solar_forecast_by_day[:points]))
plt.show()
# %%
dataidx = sc.iota(data.size, dtype=data.dtype)
reduced = sc.dimensionality.visvalingam(dataidx, data, 1000)
fig, ax = plt.subplots(figsize=(18, 8))
ax.plot(reduced[:, 0], reduced[:, 1])
plt.show()
# %%
gb = 24
hour_day = sc.unpack(data, gb, 1, gb, 1)
plt.imshow(hour_day, cmap='viridis', aspect='auto')
plt.colorbar()
plt.show()
# %%
r = sc.convolve1(hour_day, [1, -2, 1.], 'default')
plt.imshow(r, cmap='viridis', aspect='auto')
plt.colorbar()
plt.show()
# %%
plt.imshow(sc.diff2(hour_day), cmap='viridis', aspect='auto')
plt.colorbar()
plt.show()
# %%
plt.imshow(sc.diff1(hour_day), cmap='viridis', aspect='auto')
plt.colorbar()
plt.show()
# %%
svd = sc.svd(hour_day)
low_rank = svd.low_rank(7)
plt.imshow(low_rank, cmap='viridis', aspect='auto')
plt.colorbar()
plt.show()
# reconstructed = sc.pack(low_rank, low_rank.size, 1, gb, 1, gb, 1)
# fig, ax = plt.subplots(figsize=(18, 8))
# ax.plot(reconstructed)
# plt.show()
# fig, ax = plt.subplots(figsize=(18, 8))
# ax.plot(data[0:reconstructed.shape[0]] - reconstructed)
# plt.show()
# %%
fig, ax = plt.subplots(figsize=(18, 8))
ax.bar(svd.pct)
ax.plot(svd.acc_pct)
plt.show()
# %%
plt.plot(svd.u[:, 0])
plt.plot(svd.u[:, 1])
plt.show()
plt.plot(svd.vt[0, :].T)
plt.plot(svd.vt[1, :].T)
plt.show()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
33448,
1902,
32152,
5181,
10442,
311,
13,
43,
13,
198,
2,
198,
2,
770,
8090,
6127,
318,
11971,
739,
262,
17168,
362,
13,
15,
5964,
13,
198,
2,
262,
... | 2.187757 | 1,944 |
import numpy as np
import matplotlib.pyplot as plt
import lmdb
from PIL import Image
import caffe
import cv2
import pickle
from tqdm import tqdm
#Change paths to each files"
deployfile = 'yours/caffe/examples/sketch_stroke/deploy_next_stroke.prototxt'
weightfile = 'yours/next_stroke_snapshot7_1024_iter_4140000.caffemodel'
meanfile = 'yours/25_gqstroke_train_mean.binaryproto'
save_folder = 'yours/25_feature_map/'
list_folder = '/media/hci-gpu/Plextor1tb/google_quick_draw/stroke/train_set_data_list/'
img_folder = '/media/hci-gpu/Plextor1tb/google_quick_draw/stroke/imgData/'
#net
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(deployfile, weightfile, caffe.TEST)
print "net setting"
blob = caffe.proto.caffe_pb2.BlobProto()
data = open( meanfile, 'rb' ).read()
blob.ParseFromString(data)
mean = np.array( caffe.io.blobproto_to_array(blob) )
print "trans setting"
# create transformer for the input called 'data'
transformer = caffe.io.Transformer({'data1': net.blobs['data1'].data.shape})
transformer.set_transpose('data1', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data1', mean[0]) # subtract the dataset-mean value in each channel
transformer.set_channel_swap('data1', (2,1,0)) # swap channels from RGB to BGR
transformer.set_input_scale('data1', 0.00390625)
print "let's start"
#hash map initialize
dataset = []
for j in tqdm(range(345)):
for s in range(0,5):
dataset = []
failed_dataset = []
f=open(list_folder+str(s)+"_"+str(j)+".txt",'r')
count = 0
for i in tqdm(range(20000)):
line = f.readline()
label = j
filename = line[8:len(line)-len(str(j))-2]
image = cv2.imread(img_folder + filename)
transformed_image = transformer.preprocess('data1', image)
net.blobs['data1'].data[...] = transformed_image
if s==0:
net.blobs['raw_clip_data'].data[...] = 0
else:
net.blobs['raw_clip_data'].data[...] = 1
out = net.forward()
predicts = out['prob']
predict = predicts.argmax()
if int(label) == int(predict):
feature_map = net.blobs['ip1'].data
x = np.sign(feature_map)
x_bit = np.maximum(x,0,x).astype(np.bool_)
x_binary = np.packbits(x_bit)
temp = {'filename': filename, 'feature_map':x_bit}
dataset.append(temp)
count += 1
else:
feature_map = net.blobs['ip1'].data
x = np.sign(feature_map)
x_bit = np.maximum(x,0,x).astype(np.bool_)
x_binary = np.packbits(x_bit)
temp = {'filename': filename, 'feature_map':x_bit}
failed_dataset.append(temp)
if count > 2000:
break
if count < 2000:
print count, "need more data"
dataset = dataset + failed_dataset[0:2000-count]
f.close()
with open(save_folder+str(s)+"_"+str(label)+".list", 'ab') as sf:
pickle.dump(dataset, sf, protocol=pickle.HIGHEST_PROTOCOL)
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
300,
9132,
65,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
21121,
198,
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
673... | 2.058748 | 1,566 |
# File to check that the two different action-value functions (MC estimate and the action-value function in the
# estimated MDP) are actually different functions, see Section 3.2.2 in "Evaluation of Safe Policy Improvement with
# Soft Baseline Bootstrapping" by Philipp Scholl.
import os
import sys
import numpy as np
import pandas as pd
import configparser
# Set directory as the path to Evaluation-of-Safe-Policy-Improvement-with-Baseline-Bootstrapping
# directory = os.path.dirname(os.path.dirname(os.path.expanduser(__file__)))
directory = r'C:\Users\phili\PycharmProjects\Evaluation-of-Safe-Policy-Improvement-with-Baseline-Bootstrapping'
sys.path.append(directory)
path_config = configparser.ConfigParser()
path_config.read(os.path.join(directory, 'paths.ini'))
spibb_path = path_config['PATHS']['spibb_path']
sys.path.append(spibb_path)
import garnets
import spibb_utils
import spibb
import modelTransitions
nb_trajectories_list = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
delta = 0.05
ratios = [0.1, 0.3, 0.5, 0.7, 0.9] # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
seed = 1234
np.random.seed(seed)
gamma = 0.7
nb_states = 50
nb_actions = 4
nb_next_state_transition = 4
env_type = 1 # 1 for one terminal state, 2 for two terminal states
self_transitions = 0
results = []
for ratio in ratios:
garnet = garnets.Garnets(nb_states, nb_actions, nb_next_state_transition,
env_type=env_type, self_transitions=self_transitions)
softmax_target_perf_ratio = (ratio + 1) / 2
baseline_target_perf_ratio = ratio
pi_b, q_pi_b, pi_star_perf, pi_b_perf, pi_rand_perf = \
garnet.generate_baseline_policy(gamma,
softmax_target_perf_ratio=softmax_target_perf_ratio,
baseline_target_perf_ratio=baseline_target_perf_ratio, log=False)
reward_current = garnet.compute_reward()
current_proba = garnet.transition_function
r_reshaped = spibb_utils.get_reward_model(current_proba, reward_current)
results_traj = []
for nb_trajectories in nb_trajectories_list:
# Generate trajectories, both stored as trajectories and (s,a,s',r) transition samples
trajectories, batch_traj = spibb_utils.generate_batch(nb_trajectories, garnet, pi_b)
# Computation of the transition errors
model = modelTransitions.ModelTransitions(batch_traj, nb_states, nb_actions)
reward_model = spibb_utils.get_reward_model(model.transitions, reward_current)
# q_pi_b_est is the MC estimation of the action-value function
q_pi_b_est = spibb_utils.compute_q_pib_est_episodic(gamma=gamma, nb_actions=nb_actions, nb_states=nb_states,
batch=trajectories)
# q_m_hat is the action-value function in the estimated MDP.
_, q_m_hat = spibb.policy_evaluation_exact(pi_b, reward_model, model.transitions, gamma)
distance = np.linalg.norm(q_pi_b_est - q_m_hat, ord=1)
results_traj.append(distance)
print(
f'For ratio {ratio} and {nb_trajectories} trajectories, the L1 distance in the two calculations of q of '
f'pi_b is {distance}.')
# f'(Unvisited state action pairs: {nb_not_visited_state_1action_pairs})')
results.append(results_traj)
| [
2,
9220,
284,
2198,
326,
262,
734,
1180,
2223,
12,
8367,
5499,
357,
9655,
8636,
290,
262,
2223,
12,
8367,
2163,
287,
262,
198,
2,
6108,
337,
6322,
8,
389,
1682,
1180,
5499,
11,
766,
7275,
513,
13,
17,
13,
17,
287,
366,
36,
2100,... | 2.35014 | 1,428 |
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 20:19:13 2019
@author: Parikshith.H
"""
str = "Python"
print(str)
print(str[0])
ch = str[1]
print(ch)
# =============================================================================
# #output:
# Python
# P
# y
# =============================================================================
str = "Python"
print(str[1:4])
# =============================================================================
# #output:
# yth
# =============================================================================
str2 = str[1:5]
print(str2)
# =============================================================================
# #output:
# ytho
# =============================================================================
str = "Python"
print(str)
str = "python.py" #entire string is changed
print(str)
print(str[1:4])
print(str) #original string remains same
# =============================================================================
# #output:
# Python
# python.py
# yth
# python.py
# =============================================================================
s = "good morning"
print(s[1:])
print(s[:8])
print(s[:10])
print(s[0:8])
print(s[0:8:2]) #here increment by 2
print(s[9:1:-1]) #starting index is 9,ending index is 2,-1 is reverse order
print(s[9:1:-3])
print(s[: : -1])
# =============================================================================
# #output:
# ood morning
# good mor
# good morni
# good mor
# go o
# inrom do
# iod
# gninrom doog
# =============================================================================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
1737,
1596,
1160,
25,
1129,
25,
1485,
13130,
198,
198,
31,
9800,
25,
2547,
72,
50133,
342,
13,
39,
198,
37811,
628,
220,
198,
2536,
796,... | 3.933661 | 407 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import traceback
from functools import reduce
from typing import Any, Optional, Sequence, Union
import numpy as np
import oneflow
import oneflow._oneflow_internal
import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util
import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb
import oneflow.core.operator.interface_blob_conf_pb2 as inter_face_blob_conf_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.compile_context as compile_context
import oneflow.framework.distribute as distribute_util
import oneflow.framework.id_util as id_util
import oneflow.framework.placement_context as placement_ctx
import oneflow.framework.remote_blob as remote_blob_util
| [
37811,
198,
15269,
12131,
383,
1881,
37535,
46665,
13,
1439,
2489,
10395,
13,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.503817 | 393 |
import asyncio
import sys
import time
import socketio
loop = asyncio.get_event_loop()
sio = socketio.AsyncClient()
start_timer = None
guild_id = 607637793107345431
@sio.event
# await sio.emit('pool_all_request', {'type': 'member', 'guildId': guild_id, '_id': 2})
#await sio.emit('pool_all_request', {'type': 'role', 'guildId': guild_id, '_id': 3})
#await sio.emit('pool_all_request', {'type': 'autoResponse', 'guildId': guild_id, '_id': 5})
#await sio.emit('pool_request', {'type': 'member', 'guildId': guild_id, 'ids': ['214037134477230080', '1111111111111'], '_id': 7})
#await sio.emit('pool_request', {'type': 'user', 'guildId': None, 'ids': ['214037134477230080', '109462069484486656'], '_id': 9})
#await sio.emit('pool_request', {'type': 'customEmoji', 'guildId': guild_id, 'ids': ['711543547145097377'], '_id': 9})
#await sio.emit('pool_all_request', {'type': 'customEmoji', 'guildId': guild_id, '_id': 11})
# await th
@sio.event
@sio.event
@sio.event
@sio.event
@sio.event
if __name__ == '__main__':
loop.run_until_complete(start_server())
| [
11748,
30351,
952,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
17802,
952,
198,
198,
26268,
796,
30351,
952,
13,
1136,
62,
15596,
62,
26268,
3419,
198,
82,
952,
796,
17802,
952,
13,
42367,
11792,
3419,
198,
9688,
62,
45016,
796,
6... | 2.273859 | 482 |
import numpy as np
'''
A periodic domain with the left-most cell and the left-most face having index 0.
'''
| [
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
198,
32,
27458,
7386,
351,
262,
1364,
12,
1712,
2685,
290,
262,
1364,
12,
1712,
1986,
1719,
6376,
657,
13,
198,
7061,
6,
628
] | 3.333333 | 33 |
# Global
import mxnet as mx
from typing import Optional
# Local
from ivy.functional.backends.mxnet import _handle_flat_arrays_in_out
@_handle_flat_arrays_in_out
| [
2,
8060,
198,
11748,
285,
87,
3262,
355,
285,
87,
198,
6738,
19720,
1330,
32233,
198,
198,
2,
10714,
198,
6738,
21628,
88,
13,
45124,
13,
1891,
2412,
13,
36802,
3262,
1330,
4808,
28144,
62,
38568,
62,
3258,
592,
62,
259,
62,
448,
... | 2.813559 | 59 |
"""Makes 1 epoch fit to 1k steps in TensorBoard::
from teebee import Teebee
# Initialize a Teebee object with the per-epoch length (same with the
# length of the loader) and the path where TensorBoard logs will be stored.
tb = Teebee(len(loader), '/tmp/tb/run-20190219')
for epoch in range(1000):
# Let tb follow the current epoch.
tb.epoch(epoch)
# Report at the exact epoch.
tb.scalar('lr', lr)
for step, batch in enumerate(loader):
...
# Let tb follow the current in-epoch step. It will return True
# if logs should be reported at this step.
if tb.step(step):
# Calculate data to report in this block.
tb.scalar('loss', loss.item())
"""
import logging
from tensorboardX import SummaryWriter
from teebee.__about__ import __version__ # noqa
__all__ = ['Teebee']
class Teebee:
"""A TensorBoard writer that tracks training epochs and steps. It reports
1 epoch as 1k global steps in TensorBoard.
It disallows inverted steps. Always increase epoch and step.
Note:
The name of "Teebee" came from simply "TB" which is an acronym for
"TensorBoard".
"""
__slots__ = (
'epoch_length',
'writer',
'_epoch',
'_step',
'_global_step_increased',
)
def close(self) -> None:
"""Closes the underlying tensorboardX writer."""
if self.writer is not None:
self.writer.close()
def epoch(self, epoch: int) -> None:
"""Sets the current epoch and resets the step::
for epoch in range(epochs):
tb.epoch(epoch)
...
Raises:
ValueError: If the given epoch is less than the previous epoch.
"""
if epoch < self._epoch:
raise ValueError('already passed epoch: %d (new) < %d (old)'
'' % (epoch, self._epoch))
self._epoch = epoch
self._step = -1
def step(self, step: int) -> bool:
"""Sets the current step in an epoch and returns whether it increases
the global step.
for step, inputs in enumerate(loader):
...
if tb.step(step):
tb.scalar('loss', loss.item())
The interval of steps is ``[0, epoch_length-1)``.
If you report something when it returns ``False``, multiple points will
be overlapped at the same global step::
# DO NOT DO LIKE IT. ALWAYS USE "if tb.step(step):".
tb.step(step)
tb.scalar('loss', loss.item())
Raises:
ValueError: If the given step is greater than or equals to the
epoch length.
ValueError: If the given step is less than the previous step.
"""
if step >= self.epoch_length:
raise OverflowError('step is out of epoch length: '
'%d (step) >= %d (epoch length)'
'' % (step, self.epoch_length))
if step < self._step:
raise ValueError('already passed step: %d (new) < %d (old)'
'' % (step, self._step))
if self._step == step:
# When the step not changed, return the cached result.
return self._global_step_increased
if self._step == -1:
# First step() per epoch always returns True.
prev_global_step = -1
else:
prev_global_step = self.global_step()
self._step = step
self._global_step_increased = (self.global_step() != prev_global_step)
return self._global_step_increased
def global_step(self) -> int:
"""Calculates the current global step. 1k of global step means 1
epoch.
Raises:
ValueError: If :meth:`epoch` has never been called.
"""
if self._epoch < 0:
raise ValueError('epoch never set')
epoch_f = float(self._epoch)
if self._step >= 0:
epoch_f += (self._step+1) / self.epoch_length
return int(epoch_f * 1000)
@property
def scalar(self, name: str, value: float) -> None:
"""Writes scalar data."""
if self.writer is None:
self._l.debug('[%d] %s: %.5f', self.global_step(), name, value)
else:
self.writer.add_scalar(name, value, self.global_step())
def text(self, name: str, text: str) -> None:
"""Writes text data."""
if self.writer is None:
self._l.debug('[%d] %s:\n%s', self.global_step(), name, text)
else:
self.writer.add_text(name, text, self.global_step())
| [
37811,
44,
1124,
352,
36835,
4197,
284,
352,
74,
4831,
287,
309,
22854,
29828,
3712,
628,
220,
220,
220,
422,
30479,
20963,
1330,
49350,
20963,
628,
220,
220,
220,
1303,
20768,
1096,
257,
49350,
20963,
2134,
351,
262,
583,
12,
538,
53... | 2.175583 | 2,187 |
from spaceone.monitoring.connector.google_cloud_connector import GoogleCloudConnector | [
6738,
2272,
505,
13,
41143,
278,
13,
8443,
273,
13,
13297,
62,
17721,
62,
8443,
273,
1330,
3012,
18839,
34525
] | 4.25 | 20 |
#!/usr/bin/env python3
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Simple plot of linear, quadratic, and cubic curves
x = np.linspace(0, 2, 100)
plt.plot(x, x, label='linear')
plt.plot(x, x**2, label='quadratic')
plt.plot(x, x**3, label='cubic')
plt.xlabel('x label')
plt.ylabel('y label')
plt.title("Simple Plot")
plt.legend(loc="best")
plt.show()
# Histogram
x = np.random.normal(size=1000)
sns.distplot(x, bins=20, kde=True, rug=False, label="Histogram w/o Density")
sns.axlabel("Value", "Frequency")
plt.title("Histogram of a Random Sample from a Normal Distribution")
plt.legend()
plt.show()
# Scatter plot
mean, cov = [5, 10], [(1, .5), (.5, 1)]
data = np.random.multivariate_normal(mean, cov, 200)
data_frame = pd.DataFrame(data, columns=["x", "y"])
sns.jointplot(x="x", y="y", data=data_frame, kind="reg").set_axis_labels("x", "y")
plt.suptitle("Joint Plot of Two Variables with Bivariate and Univariate Graphs")
plt.show()
# Pairwise bivariate
#iris = sns.load_dataset("iris")
#sns.pairplot(iris)
#plt.show()
# Linear regression model
tips = sns.load_dataset("tips")
#sns.lmplot(x="total_bill", y="tip", data=tips)
sns.lmplot(x="size", y="tip", data=tips, x_jitter=.15, ci=None)
#sns.lmplot(x="size", y="tip", data=tips, x_estimator=np.mean, ci=None)
plt.show()
# Box plots
sns.boxplot(x="day", y="total_bill", hue="time", data=tips)
#sns.factorplot(x="time", y="total_bill", hue="smoker",
# col="day", data=tips, kind="box", size=4, aspect=.5)
plt.show()
# Bar plots
titanic = sns.load_dataset("titanic")
#sns.barplot(x="sex", y="survived", hue="class", data=titanic)
#sns.countplot(y="deck", hue="class", data=titanic, palette="Greens_d")
#plt.show()
# Non-linear regression model
anscombe = sns.load_dataset("anscombe")
# polynomial
#sns.lmplot(x="x", y="y", data=anscombe.query("dataset == 'II'"),
# order=2, ci=False, scatter_kws={"s": 80})
#plt.show()
# robust to outliers
#sns.lmplot(x="x", y="y", data=anscombe.query("dataset == 'III'"),
# robust=True, ci=False, scatter_kws={"s": 80})
#plt.show()
# logistic
#tips["big_tip"] = (tips.tip / tips.total_bill) > .15
#sns.lmplot(x="total_bill", y="big_tip", data=tips, logistic=True, y_jitter=.03).set_axis_labels("Total Bill", "Big Tip")
#plt.title("Logistic Regression of Big Tip vs. Total Bill")
#plt.show()
# lowess smoother
#sns.lmplot(x="total_bill", y="tip", data=tips, lowess=True)
#plt.show()
# Condition on other variables
#sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
# markers=["o", "x"], palette="Set1")
#sns.lmplot(x="total_bill", y="tip", hue="smoker",
# col="time", row="sex", data=tips)
#plt.show()
# Control shape and size of plot
#sns.lmplot(x="total_bill", y="tip", col="day", data=tips, col_wrap=2, size=3)
#sns.lmplot(x="total_bill", y="tip", col="day", data=tips, aspect=.5)
#plt.show()
# Plotting regression in other contexts
#sns.jointplot(x="total_bill", y="tip", data=tips, kind="reg")
#sns.pairplot(tips, x_vars=["total_bill", "size"], y_vars=["tip"],
# size=5, aspect=.8, kind="reg")
#sns.pairplot(tips, x_vars=["total_bill", "size"], y_vars=["tip"],
# hue="smoker", size=5, aspect=.8, kind="reg")
#plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
... | 2.30265 | 1,434 |
default_app_config = (
'oscar.apps.dashboard.reports.config.ReportsDashboardConfig')
| [
12286,
62,
1324,
62,
11250,
796,
357,
201,
198,
220,
220,
220,
705,
418,
7718,
13,
18211,
13,
42460,
3526,
13,
48922,
13,
11250,
13,
37844,
43041,
3526,
16934,
11537,
201,
198
] | 2.84375 | 32 |
import numpy as np
from ..nbcompat import numba
from ..order import Solver
from ..util import classproperty
from .core import AdaptiveRungeKutta, RungeKutta
@numba.njit
class LIRK(RungeKutta, abstract=True):
"""Linearly Implicit Runge-Kutta (LIRK) methods.
K = h f(t + h C, y + A @ K) + dC h^2 df/dt + h df/dy G @ K
"""
Alpha: np.ndarray
Gamma: np.ndarray
B: np.ndarray
B2: np.ndarray
@classproperty
def Alphai(cls):
"""Time coefficients."""
return np.tril(cls.Alpha, k=-1).sum(1)
@classproperty
def Gammai(cls):
"""Time Jacobian coefficients."""
return np.tril(cls.Gamma).sum(1)
@classproperty
@classproperty
@classproperty
@classproperty
@classproperty
@property
@classmethod
class ROS3P(SLIRK, Solver):
"""ROS3P
Lang, J., Verwer, J. ROS3P—An Accurate Third-Order Rosenbrock Solver Designed for Parabolic Problems.
BIT Numerical Mathematics 41, 731–738 (2001). https://doi.org/10.1023/A:1021900219772
"""
gamma = (3 + np.sqrt(3)) / 6
Alpha = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]])
Gamma = np.array(
[[gamma, 0, 0], [-1, gamma, 0], [-gamma, 1 / 2 - 2 * gamma, gamma]]
)
B = np.array([2, 0, 1]) / 3
if __name__ == "__main__":
rhs = numba.njit(lambda t, y: -y)
t, y = 0, np.array([1.0])
# s = ROS3P(rhs, t, y, h=0.1)
s = AdaptiveROS3P(rhs, t, y, h=0.1)
s.step()
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
46803,
5589,
265,
1330,
997,
7012,
198,
6738,
11485,
2875,
1330,
4294,
332,
198,
6738,
11485,
22602,
1330,
1398,
26745,
198,
6738,
764,
7295,
1330,
30019,
425,
10987,
469,
42,
315,
8... | 2.166419 | 673 |
from flask import Flask, render_template, url_for, request, redirect
import pickle
import math
app = Flask(__name__)
predicted_score = None
@app.route('/')
@app.route('/predict', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run(debug=True, port=8000)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
2581,
11,
18941,
198,
11748,
2298,
293,
198,
11748,
10688,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
628,
198,
28764,
5722,
62,
26675,
796,
6045,
198,
198,
3... | 2.791667 | 96 |
from django.conf.urls.defaults import *
from socialize.client.resources import ApplicationResource, \
ApiUserResource, CommentResource, EntityResource, LikeResource,\
ShareResource, ViewResource
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(ApplicationResource())
v1_api.register(ApiUserResource())
v1_api.register(EntityResource())
# Activites
v1_api.register(CommentResource())
v1_api.register(LikeResource())
v1_api.register(ShareResource())
v1_api.register(ViewResource())
urlpatterns = patterns('',
(r'^', include(v1_api.urls)),
) | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
6738,
1919,
1096,
13,
16366,
13,
37540,
1330,
15678,
26198,
11,
3467,
198,
220,
220,
220,
5949,
72,
12982,
26198,
11,
18957,
26198,
11,
20885,
26198,
11,
452... | 2.969072 | 194 |
from _scoping_utils import *
A = Var("A")
make(
Patch(
{A: 1},
Thread(
A.should_be(1),
A.assign(2),
),
A.should_be(2),
),
)
make(
PatchLocal(
{A: 1},
Thread(
A.should_empty(),
A.assign(2),
),
A.should_be(1),
),
)
make(
A.assign(1),
Shield(
Thread(
A.should_be(1),
A.assign(2),
),
A.should_be(2),
),
A.should_be(1),
)
make(
A.assign(1),
ShieldLocal(
Thread(
A.should_empty(),
A.assign(2),
),
A.should_be(2),
A.assign(3),
),
A.should_be(2),
)
make(
A.assign(1),
Isolate(
Thread(
A.should_empty(),
A.assign(2),
),
A.should_be(2),
),
A.should_be(1),
)
make(
A.assign(1),
IsolateLocal(
Thread(
A.should_be(1),
A.assign(2),
),
A.should_empty(),
A.assign(3),
),
A.should_be(2),
)
| [
6738,
4808,
1416,
15816,
62,
26791,
1330,
1635,
198,
198,
32,
796,
12372,
7203,
32,
4943,
198,
198,
15883,
7,
198,
220,
220,
220,
17106,
7,
198,
220,
220,
220,
220,
220,
220,
220,
1391,
32,
25,
352,
5512,
198,
220,
220,
220,
220,
... | 1.525424 | 708 |
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, July 2017
import os
import re
import pandas as pd
# importing decorator to be used in all data set loading utilities
from .decorators import load_data_from_pickle
# ------------------------------------------------------------------------
# For more info on parsing numbers in scientific notation see:
# . http://stackoverflow.com/questions/638565/parsing-scientific-notation-sensibly
# regular expression to get only data from files
# JASCO_DATA_REGEX = r'^([-+]?\d*\.\d+|\d+)\s+([+-]?\d+.?\d*(?:[Ee]-\d+)?)'
JASCO_DATA_REGEX = r'^([-+]?\d*\.\d+|\d+)\s+([+\-]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)?)'
# condition that indicates that the content matched is not of interest
JASCO_NOT_MATCH_COND = lambda match: len(match) != 1 or len(match[0]) != 2
# ------------------------------------------------------------------------
# regular expression to get only data from files
SCAN_DATA_REGEX = r'([+-]?\d+.?\d*),\s*([+-]?\d+.?\d*(?:[Ee]-\d+)?)'
# condition that indicates that the content matched is not of interest
SCAN_NOT_MATCH_COND = lambda match: len(match) != 1 or len(match[0]) != 2
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def build_data_set(data, samples_labels, features_labels, extra_cols=None):
"""Builds a data set from raw data information.
Args:
data: The samples data (vector of features).
samples_labels: The samples names or labels.
features_labels: Labels for every feature in the feature vector.
extra_cols: Extra columns for the data set (e.g. classes, properties, etc.)
Returns:
DataFrame: A Pandas DataFrame with the data set (samples as rows and features as columns).
"""
# validating features labels
features_count = len(features_labels)
if features_count <= 0:
raise ValueError('The amount of features must be positive.')
# validating data
if len(data) <= 0 or not all(len(li) == features_count for li in data):
raise ValueError('All samples must have the same amount of features.')
# creating the data frame
df = pd.DataFrame(data, index=samples_labels, columns=features_labels)
# checking for extra_cols param
if extra_cols is None:
return df
# validating extra columns
cols = extra_cols.values()
if len(extra_cols) < 1 or not all(len(x) == len(list(cols)[0]) for x in cols):
raise ValueError('Invalid extra columns.')
# appending extra columns to data frame
for c_new in extra_cols.keys():
df[c_new] = pd.Series(extra_cols[c_new], index=df.index)
# returning the built data set
return df
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# dictionary with all implemented file types parsers
file_parsers_dict = {
'jasco': parse_jasco_file,
'scan': parse_scan_file,
'list': parse_list_file,
}
# dictionary with all implemented content types parsers
content_parsers_dict = {
'jasco': parse_jasco_content,
'scan': parse_scan_content,
'list': parse_list_content,
}
# ------------------------------------------------------------------------
# list of valid cannabis samples for all 3 analytical techniques (UV, TLC, GC) used for this substance
cannabis_white_list = [
'J016R', 'F014R', 'J005R', 'F002R', 'F010R', 'A001R', 'J001R', 'F006R', 'J009R', 'M003R',
'J010R', 'J014R', 'J007R', 'J012R', 'L004S', 'M008R', 'C008X', 'J021R', 'M006R', 'C004X',
'C002X', 'F004R', 'F008R', 'J020R', 'J002R', 'F003R', 'F007R', 'F011R', 'A002R', 'F001R',
'M002R', 'J013R', 'L002S', 'J008R', 'M004R', 'J011R', 'J004R', 'J015R', 'J006R', 'K002R',
'F009R', 'L001S', 'C007X', 'C005X', 'F005R', 'C009X', 'M005R', 'C003X', 'K001R', 'L003R',
]
# ------------------------------------------------------------------------
| [
2,
48443,
14629,
14,
8800,
14,
24330,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
12622,
337,
13,
20442,
30292,
40014,
532,
1439,
6923,
33876,
198,
2,
791,
19721,
23345,
286,
428,
2393... | 2.867608 | 1,488 |
import unittest
from enumerate_markdown import headers_finder, header
| [
11748,
555,
715,
395,
198,
6738,
27056,
378,
62,
4102,
2902,
1330,
24697,
62,
22805,
11,
13639,
628,
198
] | 3.789474 | 19 |
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import sys
import logging
import random
import os
import tarfile
import io
from google.protobuf.message import DecodeError
from google.protobuf.timestamp_pb2 import Timestamp
from hfc.protos.common import common_pb2, configtx_pb2
from hfc.protos.msp import identities_pb2
from hfc.protos.peer import proposal_pb2, chaincode_pb2
from hfc.protos.utils import create_tx_payload
CC_INSTALL = "install"
CC_TYPE_GOLANG = "GOLANG"
_logger = logging.getLogger(__name__)
proto_b = \
sys.version_info[0] < 3 and (lambda x: x) or (
lambda x: x.encode('latin1'))
def create_serialized_identity(user):
"""Create serialized identity from user.
Args:
user (user object): The user object that should be serialized.
Returns:
serialized_identity: Protobuf SerializedIdentity of
the given user object.
"""
serialized_identity = identities_pb2.SerializedIdentity()
serialized_identity.mspid = user.msp_id
serialized_identity.id_bytes = user.enrollment.cert
return serialized_identity.SerializeToString()
def build_header(creator, channel_header, nonce):
"""This function will build the common header.
Args:
creator (protobuf SerializedIdentity):
Serialized identity of the creator.
channel_header (protobuf ChannelHeader): ChannelHeader
nonce (str): Nonce that has been used for the tx_id.
Returns:
header: Returns created protobuf common header.
"""
signature_header = common_pb2.SignatureHeader()
signature_header.creator = creator
signature_header.nonce = nonce
header = common_pb2.Header()
header.signature_header = signature_header.SerializeToString()
header.channel_header = channel_header.SerializeToString()
return header
def build_channel_header(type, tx_id, channel_id,
timestamp, epoch=0, extension=None,
tls_cert_hash=None):
"""Build channel header.
Args:
type (common_pb2.HeaderType): type
tx_id (str): transaction id
channel_id (str): channel id
timestamp (grpc.timestamp): timestamp
epoch (int): epoch
extension: extension
Returns:
common_proto.Header instance
"""
channel_header = common_pb2.ChannelHeader()
channel_header.type = type
channel_header.version = 1
channel_header.channel_id = proto_str(channel_id)
channel_header.tx_id = proto_str(tx_id)
channel_header.epoch = epoch
channel_header.timestamp.CopyFrom(timestamp)
if tls_cert_hash:
channel_header.tls_cert_hash = tls_cert_hash
if extension:
channel_header.extension = extension
return channel_header
def string_to_signature(string_signatures):
"""Check if signatures are already in protobuf format.
Args:
string_signatures (list): An list of protobuf ConfigSignatures either
represented as or serialized as byte strings.
Returns:
list: List of protobuf ConfigSignatures.
"""
signatures = []
for signature in string_signatures:
if signature and hasattr(signature, 'header') \
and hasattr(signature, 'signature'):
_logger.debug('_string_to_signature - signature is protobuf')
config_signature = signature
else:
_logger.debug('_string_to_signature - signature is string')
config_signature = configtx_pb2.ConfigSignature()
config_signature.ParseFromString(signature)
signatures.append(config_signature)
return signatures
def current_timestamp():
"""Get current timestamp.
Returns:
Current timestamp.
"""
timestamp = Timestamp()
timestamp.GetCurrentTime()
return timestamp
def extract_channel_config(configtx_proto_envelope):
""" Extracts the protobuf 'ConfigUpdate' object out ouf the 'ConfigEnvelope'.
Args:
configtx_proto_envelope (common_pb2.Envelope): The encoded bytes of the
ConfigEnvelope protofbuf.
Returns:
config_update (configtx_pb2.ConfigUpadeEnvelope.config_update):
The encoded bytes of the ConfigUpdate protobuf, ready to be signed
Raises:
ValueError: If there is an error in protobuf_decode due to a wrong or
not valid profobuf file a ValueError is raised.
"""
_logger.debug('extract_channel_config - start')
try:
envelope = common_pb2.Envelope()
envelope.ParseFromString(configtx_proto_envelope)
payload = common_pb2.Payload()
payload.ParseFromString(envelope.payload)
configtx = configtx_pb2.ConfigUpdateEnvelope()
configtx.ParseFromString(payload.data)
except DecodeError as e:
_logger.error('extract_channel_config - an error occurred decoding'
' the configtx_proto_envelope: {}'.format(e))
raise ValueError('The given configtx_proto_envelope was not valid: {}'
.format(e))
return configtx.config_update
def build_cc_proposal(cci_spec, header, transient_map):
""" Create an chaincode transaction proposal
Args:
transient_map: transient data map
cci_spec: The spec
header: header of the proposal
Returns: The created proposal
"""
cc_payload = proposal_pb2.ChaincodeProposalPayload()
cc_payload.input = cci_spec.SerializeToString()
if transient_map:
cc_payload.TransientMap = transient_map
proposal = proposal_pb2.Proposal()
proposal.header = header.SerializeToString()
proposal.payload = cc_payload.SerializeToString()
return proposal
def sign_proposal(tx_context, proposal):
""" Sign a proposal
Args:
tx_context: transaction context
proposal: proposal to sign on
Returns: Signed proposal
"""
proposal_bytes = proposal.SerializeToString()
sig = tx_context.sign(proposal_bytes)
signed_proposal = proposal_pb2.SignedProposal()
signed_proposal.signature = sig
signed_proposal.proposal_bytes = proposal_bytes
return signed_proposal
def send_transaction_proposal(proposal, tx_context, peers):
"""Send transaction proposal
Args:
header: header
tx_context: transaction context
proposal: transaction proposal
peers: peers
Returns: a list containing all the proposal response
"""
signed_proposal = sign_proposal(tx_context, proposal)
send_executions = [peer.send_proposal(signed_proposal)
for peer in peers]
return send_executions
def send_transaction(orderers, tran_req, tx_context):
"""Send a transaction to the chain's orderer service (one or more
orderer endpoints) for consensus and committing to the ledger.
This call is asynchronous and the successful transaction commit is
notified via a BLOCK or CHAINCODE event. This method must provide a
mechanism for applications to attach event listeners to handle
'transaction submitted', 'transaction complete' and 'error' events.
Args:
tx_context: transaction context
orderers: orderers
tran_req (TransactionRequest): The transaction object
Returns:
result (EventEmitter): an handle to allow the application to
attach event handlers on 'submitted', 'complete', and 'error'.
"""
if not tran_req:
_logger.warning("Missing input request object on the transaction "
"request")
raise ValueError(
"Missing input request object on the transaction request"
)
if not tran_req.responses or len(tran_req.responses) < 1:
_logger.warning("Missing 'proposalResponses' parameter in transaction "
"request")
raise ValueError(
"Missing 'proposalResponses' parameter in transaction request"
)
if not tran_req.proposal:
_logger.warning("Missing 'proposalResponses' parameter in transaction "
"request")
raise ValueError(
"Missing 'proposalResponses' parameter in transaction request"
)
if len(orderers) < 1:
_logger.warning("Missing orderer objects on this chain")
raise ValueError(
"Missing orderer objects on this chain"
)
endorsements = map(lambda res: res.endorsement, tran_req.responses)
tran_payload_bytes = create_tx_payload(endorsements, tran_req)
envelope = sign_tran_payload(tx_context, tran_payload_bytes)
if sys.version_info < (3, 0):
orderer = random.choice(orderers.values())
else:
orderer = random.choice(list(orderers.values()))
return orderer.broadcast(envelope)
def sign_tran_payload(tx_context, tran_payload_bytes):
"""Sign a transaction payload
Args:
signing_identity: id to sign with
tran_payload: transaction payload to sign on
Returns: Envelope
"""
sig = tx_context.sign(tran_payload_bytes)
envelope = common_pb2.Envelope()
envelope.signature = sig
envelope.payload = tran_payload_bytes
return envelope
def build_tx_req(responses):
""" Check the endorsements from peers
Args:
reponses: ProposalResponse from endorsers
Return: transaction request or None for endorser failure
"""
response, proposal, header = responses
return TXRequest(response, proposal, header)
def send_install_proposal(tx_context, peers):
"""Send install chaincode proposal
Args:
tx_context: transaction context
peers: peers to install chaincode
Returns: a set of proposal response
"""
if not tx_context:
raise ValueError("InstallProposalRequest is empty.")
if not peers:
raise ValueError("Please specify the peer.")
cc_deployment_spec = chaincode_pb2.ChaincodeDeploymentSpec()
cc_deployment_spec.chaincode_spec.type = \
chaincode_pb2.ChaincodeSpec.Type.Value(
proto_str(tx_context.tx_prop_req.cc_type))
cc_deployment_spec.chaincode_spec.chaincode_id.name = \
proto_str(tx_context.tx_prop_req.cc_name)
cc_deployment_spec.chaincode_spec.chaincode_id.path = \
proto_str(tx_context.tx_prop_req.cc_path)
cc_deployment_spec.chaincode_spec.chaincode_id.version = \
proto_str(tx_context.tx_prop_req.cc_version)
if not tx_context.tx_prop_req.packaged_cc:
cc_deployment_spec.code_package = \
package_chaincode(
tx_context.tx_prop_req.cc_path,
tx_context.tx_prop_req.cc_type)
else:
cc_deployment_spec.code_package = \
tx_context.tx_prop_req.packaged_cc
channel_header_extension = proposal_pb2.ChaincodeHeaderExtension()
channel_header_extension.chaincode_id.name = \
proto_str("lscc")
channel_header = build_channel_header(
common_pb2.ENDORSER_TRANSACTION,
tx_context.tx_id,
'',
current_timestamp(),
tx_context.epoch,
channel_header_extension.SerializeToString()
)
header = build_header(tx_context.identity,
channel_header,
tx_context.nonce)
cci_spec = chaincode_pb2.ChaincodeInvocationSpec()
cci_spec.chaincode_spec.type = \
chaincode_pb2.ChaincodeSpec.Type.Value(CC_TYPE_GOLANG)
cci_spec.chaincode_spec.chaincode_id.name = proto_str("lscc")
cci_spec.chaincode_spec.input.args.extend(
[proto_b(CC_INSTALL), cc_deployment_spec.SerializeToString()])
proposal = build_cc_proposal(
cci_spec, header,
tx_context.tx_prop_req.transient_map)
signed_proposal = sign_proposal(tx_context, proposal)
response = [peer.send_proposal(signed_proposal)
for peer in peers]
return response, proposal, header
def package_chaincode(cc_path, cc_type):
""" Package all chaincode env into a tar.gz file
Args:
cc_path: path to the chaincode
Returns: The chaincode pkg path or None
"""
_logger.debug('Packaging chaincode path={}, chaincode type={}'.format(
cc_path, cc_type))
if cc_type == CC_TYPE_GOLANG:
go_path = os.environ['GOPATH']
if not cc_path:
raise ValueError("Missing chaincode path parameter "
"in install proposal request")
if not go_path:
raise ValueError("No GOPATH env variable is found")
proj_path = go_path + '/src/' + cc_path
_logger.debug('Project path={}'.format(proj_path))
with io.BytesIO() as temp:
with tarfile.open(fileobj=temp, mode='w|gz') as code_writer:
for dir_path, _, file_names in os.walk(proj_path):
if not file_names:
raise ValueError("No chaincode file found!")
for filename in file_names:
file_path = os.path.join(dir_path, filename)
_logger.debug("The file path {}".format(file_path))
code_writer.add(
file_path,
arcname=os.path.relpath(file_path, go_path))
temp.seek(0)
code_content = temp.read()
if code_content:
return code_content
else:
raise ValueError('No chaincode found')
else:
raise ValueError('Currently only support install GOLANG chaincode')
| [
2,
15069,
19764,
11421,
13,
1584,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.50213 | 5,634 |
character = "Scorpion"
letter = input("Enter a character: ")
if letter in character:
print("{} is in {}".format(letter, character))
else: # letter not in character
print("{} is not in {}".format(letter, character))
activity = input("What would you like to do today?")
if "Dominos" not in activity.casefold(): # try with activity as Dominos
print("But pizza ?!")
| [
22769,
796,
366,
3351,
16300,
295,
1,
198,
198,
9291,
796,
5128,
7203,
17469,
257,
2095,
25,
366,
8,
198,
198,
361,
3850,
287,
2095,
25,
198,
220,
220,
220,
3601,
7203,
90,
92,
318,
287,
23884,
1911,
18982,
7,
9291,
11,
2095,
4008... | 3.166667 | 120 |
from httplib2 import Http
import random
import time
import urllib
import hmac
import hashlib
import binascii
import base64
import requests
import os
| [
6738,
1841,
489,
571,
17,
1330,
367,
29281,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
2956,
297,
571,
198,
11748,
289,
20285,
198,
11748,
12234,
8019,
198,
11748,
9874,
292,
979,
72,
198,
11748,
2779,
2414,
198,
11748,
7007,
198,
... | 3.409091 | 44 |
#!/usr/bin/env python3
# Copyright (c) 2018 Anton Semjonov
# Licensed under the MIT License
import argparse, sys
from signal import signal, SIGINT
from random_art import metadata, crypto
from random_art.randomart import draw, drunkenwalk, TRANSLATION
# exit on ctrl-c
signal(SIGINT, lambda *a: sys.exit(1))
# initialize argument parser
parser = argparse.ArgumentParser(
description=metadata.get("description"),
epilog="%%(prog)s version %s" % metadata.get("version")
)
# the input file to be hashed
parser.add_argument("file",
type=argparse.FileType("rb"),
default="/dev/stdin",
nargs="?",
help="input file (default: stdin)",
)
# print frame in ascii characters
parser.add_argument("--ascii",
action="store_true",
help="use ascii frame",
)
# additionally display base64 encoded hash
parser.add_argument("--hash",
action="store_true",
help="print base64 encoded hash",
)
# parse commandline
args = parser.parse_args()
# hash the file
digest = crypto.digest(args.file)
# maybe print encoded digest
if args.hash:
from base64 import b64encode
print("%s:%s" % (crypto.HASHNAME, b64encode(digest).decode()))
# generate randomart
art = draw(drunkenwalk(digest), name=crypto.HASHNAME)
# maybe translate to ascii
if args.ascii:
art = art.translate(TRANSLATION)
# print randomart
print(art, end="")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
2864,
9261,
12449,
46286,
709,
198,
2,
49962,
739,
262,
17168,
13789,
198,
198,
11748,
1822,
29572,
11,
25064,
198,
6738,
6737,
1330,
6737,
11,
33993,
... | 2.840671 | 477 |
from controllers.error_controller import ErrorController
from controllers.type_checker import TypeChecker
from controllers.symbol_table import SymbolTable
from controllers import data_mode
from views.data_window import DataWindow
| [
6738,
20624,
13,
18224,
62,
36500,
1330,
13047,
22130,
198,
6738,
20624,
13,
4906,
62,
9122,
263,
1330,
5994,
9787,
263,
198,
6738,
20624,
13,
1837,
23650,
62,
11487,
1330,
38357,
10962,
198,
6738,
20624,
1330,
1366,
62,
14171,
198,
673... | 4.62 | 50 |
# Find the sum of all the multiples of 3 or 5 below 1000.
if __name__ == "__main__":
print(f"The sum of all the multiples of 3 or 5 below 1000 is {count_mult2()}")
| [
2,
9938,
262,
2160,
286,
477,
262,
5021,
2374,
286,
513,
393,
642,
2174,
8576,
13,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
69,
1,
464,
2160,
286,
477,
262,
5021,
2374,
... | 2.85 | 60 |
import xlrd
from optparse import make_option
# from pprint import pprint as print
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from web.models import Person, Country, GeneralExpertise, OERExpertise, OpenAccessExpertise, MOOCExpertise, Region
| [
11748,
2124,
75,
4372,
198,
6738,
2172,
29572,
1330,
787,
62,
18076,
198,
2,
422,
279,
4798,
1330,
279,
4798,
355,
3601,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
2679... | 3.493976 | 83 |
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.rnn import GRUCell
import time
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
13,
81,
20471,
1330,
406,
2257,
9655,
695,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
13,
81,
204... | 3.24 | 50 |
# Generated by Django 2.0.5 on 2018-05-22 18:44
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
20,
319,
2864,
12,
2713,
12,
1828,
1248,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.891892 | 37 |
import sys
import tarfile
import tempfile
import shutil
import click
import requests
import stups_cli.config
import zign.api
from clickclick import AliasedGroup, OutputFormat, UrlType, error, fatal_error, print_table, ok
from requests import RequestException
import pierone
from .api import PierOne, DockerMeta, docker_login_with_credhelper, get_latest_tag, parse_time, request
from .exceptions import PieroneException, ArtifactNotFound
from .types import DockerImage
from .ui import DetailsBox, format_full_image_name, markdown_2_cli
from .utils import get_registry
from .validators import validate_team
KEYRING_KEY = 'pierone'
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help']}
output_option = click.option('-o', '--output', type=click.Choice(['text', 'json', 'tsv']), default='text',
help='Use alternative output format')
url_option = click.option('--url', help='Pier One URL', metavar='URI')
def set_pierone_url(config: dict, url: str) -> str:
'''Read Pier One URL from cli, from config file or from stdin.'''
url = url or config.get('url')
while not url:
url = click.prompt('Please enter the Pier One URL', type=UrlType())
try:
requests.get(url, timeout=5)
except Exception:
error('Could not reach {}'.format(url))
url = None
if '://' not in url:
# issue 63: gracefully handle URLs without scheme
url = 'https://{}'.format(url)
validate_pierone_url(url)
config['url'] = url
return url
@click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS)
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
@click.pass_context
@cli.command()
@url_option
@click.pass_obj
def login(config, url):
'''Login to Pier One Docker registry (generates docker configuration in ~/.docker/config.json)'''
url_option_was_set = url
url = set_pierone_url(config, url)
if not url_option_was_set:
stups_cli.config.store_config(config, 'pierone')
# Check if the credential helper is available
if shutil.which("docker-credential-pierone") is None:
fatal_error("docker-credential-pierone executable is not available. "
"If you've installed `pierone` to a virtual environment, make sure to add it to to the PATH.")
docker_login_with_credhelper(url)
ok("Authentication configured for {}, you don't need to run pierone login anymore!".format(url))
@cli.command()
@url_option
@output_option
@click.pass_obj
def teams(config, output, url):
'''List all teams having artifacts in Pier One'''
set_pierone_url(config, url)
token = get_token()
r = request(config.get('url'), '/teams', token)
rows = [{'name': name} for name in sorted(r.json())]
with OutputFormat(output):
print_table(['name'], rows)
@cli.command()
@click.argument('team', callback=validate_team)
@url_option
@output_option
@click.pass_obj
def artifacts(config, team, url, output):
"""List all team artifacts"""
url = set_pierone_url(config, url)
api = PierOne(url)
result = api.get_artifacts(team)
rows = [{'team': team, 'artifact': name} for name in sorted(result)]
with OutputFormat(output):
print_table(['team', 'artifact'], rows)
@cli.command()
@click.argument('team', callback=validate_team)
@click.argument('artifact', nargs=-1)
@url_option
@output_option
@click.option('-l', '--limit', type=int, help='Limit number of versions to show per artifact')
@click.pass_obj
def tags(config, team: str, artifact, url, output, limit):
'''List all tags for a given team'''
registry = set_pierone_url(config, url)
api = PierOne(registry)
if limit is None:
# show 20 rows if artifact was given, else show only 3
limit = 20 if artifact else 3
if not artifact:
artifact = api.get_artifacts(team)
if not artifact:
raise click.UsageError('The Team you are looking for does not exist or '
'we could not find any artifacts registered in Pierone! '
'Please double check for spelling mistakes.')
slice_from = - limit
rows = []
for art in artifact:
image = DockerImage(registry=registry, team=team, artifact=art, tag=None)
try:
tags = api.get_image_tags(image)
except ArtifactNotFound:
raise click.UsageError("Artifact or Team does not exist! "
"Please double check for spelling mistakes.")
else:
rows.extend(tags[slice_from:])
# sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service)
rows.sort(key=lambda row: (row['team'], row['artifact']))
with OutputFormat(output):
titles = {
"created_time": "Created",
"created_by": "By",
}
print_table(
[
"team",
"artifact",
"tag",
"created_time",
"created_by",
"status",
"status_reason",
],
rows,
titles=titles
)
@cli.command()
@click.argument("team", callback=validate_team)
@click.argument("artifact")
@click.argument("tag")
@url_option
@output_option
@click.pass_obj
def cves(config, team, artifact, tag, url, output):
"""DEPRECATED"""
print("\x1b[1;33m!! THIS FUNCTIONALITY IS DEPRECATED !!\x1b[0m", file=sys.stderr)
@cli.command("mark-production-ready")
@click.argument("incident")
@click.argument("team", callback=validate_team)
@click.argument("artifact")
@click.argument("tag")
@url_option
@click.pass_obj
def mark_production_ready(config, incident, team, artifact, tag, url):
"""
Manually mark image as production ready.
"""
pierone_url = set_pierone_url(config, url)
registry = get_registry(pierone_url)
image = DockerImage(registry, team, artifact, tag)
if incident.startswith("INC-"):
# if it's a JIRA ticket, mark image as production ready in Pierone
api = PierOne(pierone_url)
api.mark_production_ready(image, incident)
else:
meta = DockerMeta()
meta.mark_production_ready(image, incident)
if team in ["ci", "automata", "torch"]:
click.echo("🧙 ", nl=False)
click.echo(
"Marked {} as `production_ready` due to incident {}.".format(
format_full_image_name(image), incident
)
)
@cli.command()
@click.argument("team", callback=validate_team)
@click.argument("artifact")
@click.argument("tag")
@url_option
@click.pass_obj
def describe(config, team, artifact, tag, url):
"""Describe docker image."""
url = set_pierone_url(config, url)
registry = get_registry(url)
api = PierOne(url)
meta = DockerMeta()
image = DockerImage(registry=registry, team=team, artifact=artifact, tag=tag)
tag_info = api.get_tag_info(image)
image_metadata = meta.get_image_metadata(image)
ci_info = image_metadata.get("ci")
compliance = image_metadata.get("compliance")
base_image_info = image_metadata.get("base_image")
status_details = markdown_2_cli(compliance.get("checker", {}).get("details", ""))
details_box = DetailsBox()
details_box.set("General Information", "Team", team)
details_box.set("General Information", "Artifact", artifact)
details_box.set("General Information", "Tag", tag)
details_box.set("General Information", "Author", tag_info["created_by"])
details_box.set("General Information", "Created in", tag_info["created"])
if ci_info:
details_box.set("Commit Information", "Repository", ci_info["url"])
details_box.set("Commit Information", "Hash", ci_info["revision"])
details_box.set("Commit Information", "Time", ci_info["created"])
details_box.set("Commit Information", "Author", ci_info["author"])
else:
details_box.set("Compliance Information", "Valid SCM Source", "No SCM Source")
details_box.set(
"Compliance Information",
"Effective Status",
compliance.get("status", "Not Processed"),
)
details_box.set(
"Compliance Information",
"Checker Status",
compliance.get("checker", {}).get("status", "Not Processed"),
)
details_box.set(
"Compliance Information",
"Checker Status Date",
compliance.get("checker", {}).get("received_at", "NOT SET"),
)
details_box.set(
"Compliance Information",
"Checker Status Reason",
compliance.get("checker", {}).get("reason", "NOT SET"),
)
# TODO make markdown function return a string
details_box.set(
"Compliance Information",
"Checker Status Details",
status_details if status_details else "",
)
if compliance.get("user"):
user_status = compliance["user"]
details_box.set("Compliance Information", "User Status", user_status["status"])
details_box.set(
"Compliance Information",
"User Status Date",
user_status["received_at"],
)
details_box.set(
"Compliance Information",
"User Status Reason",
user_status["reason"],
)
details_box.set(
"Compliance Information",
"User Status Issue",
# TODO make non-optional after PR merge
user_status.get("incident", "NOT SET"),
)
details_box.set(
"Compliance Information",
"User Status Set by",
user_status["set_by"],
)
else:
details_box.set("Compliance Information", "User Status", "Not Set")
if compliance.get("emergency"):
emergency_status = compliance["emergency"]
details_box.set(
"Compliance Information", "Emergency Status", emergency_status["status"]
)
details_box.set(
"Compliance Information",
"Emergency Status Date",
emergency_status["received_at"],
)
details_box.set(
"Compliance Information",
"Emergency Status Reason",
emergency_status["reason"],
)
else:
details_box.set("Compliance Information", "Emergency Status", "Not Set")
base_image = base_image_info.get("name") or "UNKNOWN"
details_box.set("Compliance Information", "Base Image Name", base_image)
details_box.set(
"Compliance Information",
"Base Image Allowed",
"Yes" if base_image_info["allowed"] else "No",
)
details_box.set(
"Compliance Information", "Base Image Details", base_image_info["message"]
)
details_box.render()
@cli.command()
@click.argument('team', callback=validate_team)
@click.argument('artifact')
@url_option
@output_option
@click.pass_obj
def latest(config, team, artifact, url, output):
'''Get latest tag/version of a specific artifact'''
# validate that the token exists!
set_pierone_url(config, url)
token = get_token()
registry = get_registry(config.get('url'))
image = DockerImage(registry=registry, team=team, artifact=artifact, tag=None)
latest_tag = get_latest_tag(image, token)
if latest_tag:
print(latest_tag)
else:
raise PieroneException('Latest tag not found')
@cli.command('scm-source')
@click.argument('team', callback=validate_team)
@click.argument('artifact')
@click.argument('tag', nargs=-1)
@url_option
@output_option
@click.pass_obj
def scm_source(config, team, artifact, tag, url, output):
'''Show SCM source information such as GIT revision'''
url = set_pierone_url(config, url)
api = PierOne(url)
token = get_token()
tags = get_tags(url, team, artifact, token)
if not tags:
raise click.UsageError('Artifact or Team does not exist! '
'Please double check for spelling mistakes.')
if not tag:
tag = [t['name'] for t in tags]
rows = []
for t in tag:
image = DockerImage(url, team, artifact, t)
try:
scm_source = api.get_scm_source(image)
row = scm_source
except ArtifactNotFound:
row = {}
row['tag'] = t
matching_tag = [d for d in tags if d['name'] == t]
row['created_by'] = ''.join([d['created_by'] for d in matching_tag])
if matching_tag:
row['created_time'] = parse_time(''.join([d['created'] for d in matching_tag]))
rows.append(row)
rows.sort(key=lambda row: (row['tag'], row.get('created_time')))
with OutputFormat(output):
print_table(['tag', 'author', 'url', 'revision', 'status', 'created_time', 'created_by'], rows,
titles={'tag': 'Tag', 'created_by': 'By', 'created_time': 'Created',
'url': 'URL', 'revision': 'Revision', 'status': 'Status'},
max_column_widths={'revision': 10})
@cli.command('image')
@click.argument('image')
@url_option
@output_option
@click.pass_obj
def image(config, image, url, output):
"""
List tags that point to this image
NOTE: this is broken for large namespaces
"""
# TODO reimplement with `GET /v2/_catalog` and `GET /v2/<name>/tags/list`
set_pierone_url(config, url)
token = get_token()
try:
resp = request(config.get('url'), '/tags/{}'.format(image), token)
except requests.HTTPError as error:
status_code = error.response.status_code
if status_code == 404:
click.echo('Image {} not found'.format(image))
elif status_code == 412:
click.echo('Prefix {} matches more than one image.'.format(image))
else:
raise error
return
tags = resp.json()
with OutputFormat(output):
print_table(['team', 'artifact', 'name'],
tags,
titles={'name': 'Tag', 'artifact': 'Artifact', 'team': 'Team'})
@cli.command('inspect-contents')
@click.argument('team', callback=validate_team)
@click.argument('artifact')
@click.argument('tag', nargs=-1)
@click.option('-l', '--limit', type=int, default=1)
@url_option
@output_option
@click.pass_obj
def inspect_contents(config, team, artifact, tag, url, output, limit):
'''List image contents (files in tar layers)'''
set_pierone_url(config, url)
token = get_token()
tags = get_tags(config.get('url'), team, artifact, token)
if not tag:
tag = [t['name'] for t in tags]
CHUNK_SIZE = 8192
TYPES = {b'5': 'D', b'0': ' '}
rows = []
for t in tag:
row = request(config.get('url'), '/v2/{}/{}/manifests/{}'.format(team, artifact, t),
token).json()
if row.get('layers'):
layers = reversed([lay.get('digest') for lay in row.get('layers')])
else:
layers = [lay.get('blobSum') for lay in row.get('fsLayers')]
if layers:
found = 0
for i, layer in enumerate(layers):
layer_id = layer
if layer_id:
response = request(config.get('url'), '/v2/{}/{}/blobs/{}'.format(team, artifact, layer_id), token)
with tempfile.NamedTemporaryFile(prefix='tmp-layer-', suffix='.tar') as fd:
for chunk in response.iter_content(CHUNK_SIZE):
fd.write(chunk)
fd.flush()
with tarfile.open(fd.name) as archive:
has_member = False
for member in archive.getmembers():
rows.append({'layer_index': i, 'layer_id': layer_id, 'type': TYPES.get(member.type),
'mode': oct(member.mode)[-4:],
'name': member.name, 'size': member.size, 'created_time': member.mtime})
has_member = True
if has_member:
found += 1
if found >= limit:
break
rows.sort(key=lambda row: (row['layer_index'], row['name']))
with OutputFormat(output):
print_table(['layer_index', 'layer_id', 'mode', 'name', 'size', 'created_time'], rows,
titles={'created_time': 'Created', 'layer_index': 'Idx'},
max_column_widths={'layer_id': 16})
| [
11748,
25064,
198,
11748,
13422,
7753,
198,
11748,
20218,
7753,
198,
11748,
4423,
346,
198,
198,
11748,
3904,
198,
11748,
7007,
198,
11748,
336,
4739,
62,
44506,
13,
11250,
198,
11748,
1976,
570,
13,
15042,
198,
6738,
3904,
12976,
1330,
... | 2.343486 | 7,092 |
from preprocessorCreate import polyX, rescaleX
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
from CvEstimate import cv_estimate
from sklearn.model_selection import cross_validate
from sklearn import preprocessing
# 产生训练集和测试集
n_samples = 21
xtrain = np.linspace(0,20,n_samples)[:,np.newaxis]
xtest = np.arange(0,20,0.1)[:,np.newaxis]
w = np.array([-1.5, 1/9])
fun = lambda x:w[0]*x + w[1]*x**2
sigma = 4
ytrain = fun(xtrain) + np.random.randn(len(xtrain),1)*sigma**0.5
ytestNoisefree = fun(xtest)
ytestNoisey = ytestNoisefree + np.random.randn(len(xtest),1)*sigma**0.5
# 对数据y进行归一化,此处对y进行求均值处理,最终的影响只是在y轴上的上下平移
ytrain = ytrain - np.mean(ytrain)
ytest = ytestNoisey - np.mean(ytestNoisey)
# 对数据进行多维拓展和归一化
pp_poly = polyX(degree=14)
pp_resc = rescaleX()
pp_list = [pp_resc, pp_poly]
Xtrain = xtrain.copy()
Xtest = xtest.copy()
# 对训练集和测试集应该采用同样的预处理方式
for pp in pp_list:
Xtrain = pp.preprocess(Xtrain)
Xtest = pp.preprocess(Xtest)
# 导入线性回归模型
lr = linear_model.LinearRegression()
lr.fit(Xtrain, ytrain)
print(lr.coef_[0])
#print("{:.3f}".format(lr.coef_[0]))
print(list("{0:.3f}".format(coff) for coff in lr.coef_[0]))
# 对测试集进行预测
ypredTest = lr.predict(Xtest)
# 绘图
plt.scatter(xtrain, ytrain, color='blue')
plt.plot(xtest, ypredTest, color='black', linewidth=3)
plt.show()
### 采用岭回归
lambdas = np.logspace(-10, 1.3, 10)
NL = len(lambdas)
printNdx = [1, 5]
testMse = np.zeros(NL)
trainMse = np.zeros(NL)
for k in range(NL):
_lambda = lambdas[k]
ridge_lr = linear_model.Ridge(alpha=_lambda)
ridge_lr.fit(Xtrain, ytrain)
ypredTest = ridge_lr.predict(Xtest)
ypredTrain = ridge_lr.predict(Xtrain)
testMse[k] = np.mean(((ypredTest-ytest).flatten())**2)
trainMse[k] = np.mean(((ypredTrain-ytrain).flatten())**2)
ndx = np.log10(lambdas)
plt.plot(ndx, trainMse, 'bs:', linewidth=2, markersize=12, label='train')
plt.plot(ndx, testMse, 'rx-', linewidth=2, markersize=12, label='test')
plt.xlabel('log10 lambda')
plt.legend()
plt.show()
# 打印两张岭回归后的图
for k in printNdx:
_lambda = lambdas[k]
print(np.log10(_lambda))
ridge_lr = linear_model.Ridge(alpha = _lambda)
ridge_lr.fit(Xtrain, ytrain)
print(list("{0:.3f}".format(coff) for coff in ridge_lr.coef_[0]))
ypredTest = ridge_lr.predict(Xtest)
ypredTrain = ridge_lr.predict(Xtrain)
modelSigm = np.sum((ypredTrain-ytrain)**2)/ytrain.shape[0] # 模型预测的方差
plt.scatter(xtrain, ytrain, color='black')
plt.plot(xtest, ypredTest, color='k', linewidth=3)
plt.plot(xtest, ypredTest-modelSigm, 'b:', linewidth=3)
plt.plot(xtest, ypredTest+modelSigm, 'b:', linewidth=3)
plt.title('lambda={:.2}'.format(np.log10(_lambda)))
plt.show()
### 交叉验证
mse = []
for k in range(NL):
_lambda = lambdas[k]
ridge_lr = linear_model.Ridge(alpha=_lambda)
nfolds = 5
scores = cross_validate(ridge_lr, Xtrain, ytrain.flatten(), scoring='neg_mean_squared_error', cv=5)
mse.append(-np.mean(scores['test_score']))
new_mse = np.log(mse)
plt.plot(np.log10(lambdas), new_mse, 's-', color='b', linewidth=3, markersize=8, markeredgewidth=3, markerfacecolor='white')
plt.show()
| [
6738,
662,
41341,
16447,
1330,
7514,
55,
11,
6811,
1000,
55,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
14174,
62,
19849,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
327,
85,
22362,
... | 1.911961 | 1,647 |
#!/usr/bin/env python
""" Update collection in Nuxeo """
import sys, os, requests
from lxml import etree
import pprint
from pynux import utils
from urlparse import urlparse
metadata_dir = "/apps/content/metadata/UCSF/JapaneseWoodblocks/ucb/"
pp = pprint.PrettyPrinter()
nx = utils.Nuxeo()
nuxeo_limit = 24
nuxeo_basepath = '/asset-library/UCSF/JapaneseWoodblocks'
nsmap = {'mets': 'http://www.loc.gov/METS/', 'mods': 'http://www.loc.gov/mods/v3', 'rts': 'http://cosimo.stanford.edu/sdr/metsrights/'}
toolong = []
def get_properties(document, label, is_parent):
""" get properties (metadata) for adding to object component in Nuxeo """
# get properties
item_dict = {}
mods = get_mods_element(document, label)
if mods is not None:
item_dict = xml_to_dict(mods)
else:
item_dict['dc:title'] = label
return item_dict
def get_struct_element_info(struct_element, parent_path, document, doc_id):
""" assemble a dict containing data we want for a given mets:structMap/mets:div element """
element_info = {}
# label
label = struct_element.get('LABEL')
element_info['label'] = label
# raw filename and path
raw_filename, raw_path = get_master_file(document, struct_element)
element_info['raw_filename'] = raw_filename
element_info['raw_path'] = raw_path
# generic path
if raw_filename:
path = os.path.join(parent_path, raw_filename)
else:
path = os.path.join(parent_path, doc_id)
element_info['path'] = path
return element_info
def get_master_file(document, mets_div):
""" choose the master file for this component for import into Nuxeo. This is the raw_file info, with filename not yet truncated or upper-cased. """
master_filename = ''
master_path = ''
files = {}
# get list of files for this component
for fptr in mets_div.iterfind('mets:fptr', namespaces=nsmap):
file_id = fptr.get('FILEID')
dir, filename = get_raw_filename(document, file_id)
#size = os.path.getsize(os.path.join(dir, filename))
files[filename] = os.path.join(dir, filename)
# determine master file
substrings = ['.tif']
for sub in substrings:
if master_filename:
break
else:
for key, value in files.iteritems():
if sub in key:
master_filename = key
master_path = value
return master_filename, master_path
def get_raw_filename(document, mets_file_id):
""" given the FILEID from mets, find the name of the corresponding file we grabbed from UCB server """
for metsfile in document.iterfind('mets:fileSec/mets:fileGrp/mets:file', namespaces=nsmap):
if metsfile.get('ID') == mets_file_id:
for flocat in metsfile.iterfind('mets:FLocat', namespaces=nsmap):
if flocat.get('{http://www.w3.org/1999/xlink}href').startswith('http://nma.berkeley.edu'):
ucb_url = flocat.get('{http://www.w3.org/1999/xlink}href')
dir, filename = get_local_filepath(ucb_url)
return dir, filename
def get_local_filepath(ucb_url):
""" given UCB URL, get filepath of local file that we grabbed from UCB server """
content_dir = "/apps/content/raw_files/UCSF/JapaneseWoodblocks/"
# example: http://nma.berkeley.edu/ark:/28722/bk0000m7z5r
parsed_url = urlparse(ucb_url)
ark = parsed_url.path.split('/ark:/')[1]
dir = os.path.join(content_dir, ark)
try:
filename = [files for root, dirs, files in os.walk(dir)][0][0]
except:
r = requests.get(ucb_url, allow_redirects=True)
path, filename = os.path.split(urlparse(r.url).path)
dest_dir = os.path.join(content_dir, ark)
dest_path = os.path.join(dest_dir, filename)
_mkdir(dest_dir)
with open(dest_path, 'wb') as fd:
fd.write(r.content)
print "Grabbed file:", filename
return dir, filename
# http://code.activestate.com/recipes/82465-a-friendly-mkdir/
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def xml_to_dict(mods):
""" convert mods XML to Nuxeo-friendly python dict """
properties = {}
properties_raw = extract_properties(mods)
properties = format_properties(properties_raw)
return properties
def format_properties(properties_list):
""" format values per property """
properties = {}
repeatables = ("ucldc_schema:alternativetitle", "ucldc_schema:collection", "ucldc_schema:campusunit", "ucldc_schema:subjecttopic", "ucldc_schema:contributor", "ucldc_schema:creator", "ucldc_schema:date", "ucldc_schema:formgenre", "ucldc_schema:localidentifier", "ucldc_schema:language", "ucldc_schema:place", "ucldc_schema:relatedresource", "ucldc_schema:rightsholder", "ucldc_schema:subjectname", "ucldc_schema:publisher")
# get list of unique property names
property_names = [p[0] for p in properties_list]
property_names_set = set(property_names)
property_names_unique = list(property_names_set)
# aggregate and format values for each property name
for name in property_names_unique:
property_values = []
formatted_property = {}
# aggregate
for sublist in properties_list:
if sublist[0] == name:
property_values.append(sublist[1])
# format
if name in repeatables:
formatted_value = []
for values in property_values:
formatted_value.append(get_formatted_value(values))
else:
formatted_value = '. '.join(property_values)
# put it all together
formatted_property[name] = formatted_value
properties.update(formatted_property)
return properties
def get_formatted_value(values):
""" format values for nuxeo. values can be string or list. convert lists to dicts. probably could've just captured this data as a dict in the first place! """
if isinstance(values, list):
value_dict = {}
for item in values:
value_dict[item[0]] = item[1]
formatted = value_dict
else:
formatted = values
return formatted
def extract_properties(mods):
""" extract a list of properties from the XML """
properties_raw = []
# type
properties_raw.append(['ucldc_schema:type', 'image'])
# campusunit
properties_raw.append(['ucldc_schema:campusunit', 'https://registry.cdlib.org/api/v1/repository/25/'])
# collection
properties_raw.append(['ucldc_schema:collection', 'https://registry.cdlib.org/api/v1/collection/108/'])
# get metadata from MODS
# title
for title_info in mods.iterfind('mods:titleInfo', namespaces=nsmap):
if title_info.get('type') == 'alternative':
for title in title_info.iterfind('mods:title', namespaces=nsmap):
properties_raw.append(['ucldc_schema:alternativetitle', title.text])
else:
for title in title_info.iterfind('mods:title', namespaces=nsmap):
properties_raw.append(['dc:title', title.text])
# creator
for name in mods.iterfind('mods:name', namespaces=nsmap):
for roleTerm in name.iterfind('mods:role/mods:roleTerm', namespaces=nsmap):
if roleTerm.get('type') == 'text':
role = roleTerm.text
for namePart in name.iterfind('mods:namePart', namespaces=nsmap):
name_text = namePart.text
creator_properties = []
creator_properties.append(['role', role])
creator_properties.append(['nametype', 'persname']) # all personal
creator_properties.append(['name', name_text])
properties_raw.append(['ucldc_schema:creator', creator_properties])
# place
for place_term in mods.iterfind('mods:originInfo/mods:place/mods:placeTerm', namespaces=nsmap):
place = place_term.text
properties_raw.append(['ucldc_schema:publisher', place])
# date created
date_properties = []
for date in mods.iterfind('mods:originInfo/mods:dateCreated', namespaces=nsmap):
if len(date.attrib) == 0:
date_properties.append(['date', date.text])
elif date.get('keyDate') == 'yes':
date_properties.append(['single', date.text])
if len(date_properties) > 0:
date_properties.append(['datetype', 'created'])
properties_raw.append(['ucldc_schema:date', date_properties])
# form/genre
for genre in mods.iterfind('mods:genre', namespaces=nsmap):
source = genre.get('authority')
properties_raw.append(['ucldc_schema:formgenre', [['heading', genre.text], ['source', source]]])
# language
for language_term in mods.iterfind('mods:language/mods:languageTerm', namespaces=nsmap):
language_code = language_term.text
if language_code == 'jpn':
language = 'Japanese'
elif language_code == 'eng':
language = 'English'
elif language_code == 'chi':
language = 'Chinese'
language_properties = []
language_properties.append(['language', language])
language_properties.append(['languagecode', language_code])
properties_raw.append(['ucldc_schema:language', language_properties])
# physical description
for physDesc in mods.iterfind('mods:physicalDescription/mods:extent', namespaces=nsmap):
properties_raw.append(['ucldc_schema:physdesc', physDesc.text])
# description
for abstract in mods.iterfind('mods:abstract', namespaces=nsmap):
properties_raw.append(['ucldc_schema:description', abstract.text])
# subject
for subject in mods.iterfind('mods:subject', namespaces=nsmap):
subject_source = subject.get('authority')
# topic
for topic in subject.iterfind('mods:topic', namespaces=nsmap):
topic_heading = topic.text
properties_raw.append(['ucldc_schema:subjecttopic', [['heading', topic.text], ['headingtype', 'topic'], ['source', subject_source]]])
# name
for subjectname in subject.iterfind('mods:name', namespaces=nsmap):
for name_part in subjectname.iterfind('mods:namePart', namespaces=nsmap):
name = name_part.text
if subjectname.get('type') == 'personal':
nametype = 'persname'
elif subjectname.get('type') == 'corporate':
nametype = 'corpname'
name_source = subjectname.get('authority')
name_properties = []
name_properties.append(['name', name])
name_properties.append(['nametype', nametype])
name_properties.append(['source', name_source])
#properties_raw.append(['ucldc_schema:subjectname', name_properties])
# places
for geographic in subject.iterfind('mods:geographic', namespaces=nsmap):
place = geographic.text
geo_properties = []
geo_properties.append(['name', place])
geo_properties.append(['source', subject_source])
properties_raw.append(['ucldc_schema:place', geo_properties])
# rights status
properties_raw.append(['ucldc_schema:rightsstatus', 'copyrighted'])
# rights statement
properties_raw.append(['ucldc_schema:rightsstatement', 'Transmission or reproduction of materials protected by copyright beyond that allowed by fair use requires the written permission of the copyright owners. Works not in the public domain cannot be commercially exploited without permission of the copyright owner. Responsibility for any use rests exclusively with the user.'])
return properties_raw
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
10133,
4947,
287,
399,
18095,
78,
37227,
198,
11748,
25064,
11,
28686,
11,
7007,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
11748,
279,
4798,
198,
6738,
279,
2047,
2821,
1330,
... | 2.469588 | 4,998 |
out = [
0,
# 1 = Arousal max
[
-1,
{"i":1},
{"i":100,"d":250,"r":1,"e":"Sinusoidal.In"},
{"i":0},
{"i":0,"d":500}
],
# 2 = Rocket
[
-1,
{},
{"i":255,"d":1000,"e":"Exponential.In"},
{"i":1,"d":1000,"e":"Exponential.In"},
{"i":150,"d":100,"r":8},
{"i":50,"d":100},
{"i":50,"d":600},
{"i":200,"d":0,"r":8},
{"i":200,"d":200,"r":8},
{"i":1,"d":0},
{"i":1,"d":200},
{"i":200,"d":0},
{"i":200,"d":200},
{"i":50,"d":0},
{"i":20,"d":300},
{"i":200,"d":0},
{"i":100,"d":400},
{"i":50,"d":0},
{"i":50,"d":200},
{"i":200,"d":500,"e":"Exponential.In"},
{"i":1,"d":500,"e":"Exponential.In"}
],
# 3 = Pain small
[
1,
{"i":200,"d":0},
{"i":200,"d":50},
{"i":1},
{"d":100}
],
# 4 = Pain Large
[
0,
{"i":255,"d":0},
{"i":255,"d":100},
{"i":0,"d":1000, "e":"Sinusoidal.In"}
],
# 5 = Ars small
[
0,
{"i":70, "d":250},
{"d":250}
],
# 6 = Ars large
[
0,
{"i":0},
{"i":125, "y":True, "e":"Sinusoidal.InOut", "d":250, "r":1},
{"d":750, "e":"Sinusoidal.InOut"}
],
# 7 = Jade Rod
[
-1,
{"i":1},
{"i":100,"d":2000,"e":"Sinusoidal.InOut","r":1,"y":1}
],
# 8 = Small tickles
[
-1,
{"i":1},
{"i":50,"d":100,"e":"Sinusoidal.Out","r":1,"y":1}
],
# 9 = Idle ooze
[
-1,
{"i":1},
{"i":20,"d":5000},
{"i":150,"d":100,"e":"Sinusoidal.Out","r":3,"y":1},
{"i":1,"d":3000},
{"i":25,"d":6000},
{"i":250,"d":100,"e":"Sinusoidal.Out","r":1,"y":1},
{"i":1,"d":10000},
],
#10 = Pulsating mushroom
[
-1,
{"i":1},
{"i":100,"d":250, "e":"Sinusoidal.In", "r":3,"y":1},
{"i":0,"d":1000},
{"i":50,"d":2000, "e":"Bounce.InOut", "r":1, "y":1},
{"i":0,"d":2000},
],
#11 = Pulsating mushroom small
[
-1,
{"i":1},
{"i":20,"d":10000, "e":"Sinusoidal.InOut"},
{"i":1,"d":1000},
{"i":100,"d":2000, "e":"Bounce.InOut", "r":1, "y":1},
{"i":1,"d":2000},
{"i":10,"d":30000, "e":"Sinusoidal.InOut"},
{"i":50,"d":250, "e":"Bounce.InOut", "r":5, "y":1},
{"i":1, "d":10000}
],
#12 = Shara's fel rod
[
-1,
{"i":255},
{"i":False,"d":{"min":250,"max":1000}},
{},
{"d":{"min":100,"max":1000}},
{"i":255},
{"i":255,"d":{"min":100,"max":500}},
{"i":10},
{"i":60, "d":{"min":2000,"max":5000}},
{},
{"i":{"min":50,"max":200},"d":{"min":100,"max":600},"r":{"min":1,"max":6}},
{},
{"d":{"min":100,"max":3000}},
{"i":255},
{"i":255,"d":{"min":500,"max":1000}},
{"i":{"min":10,"max":50}},
{"i":False,"d":{"min":1000,"max":5000}},
],
#13 = Shattering song
[
-1,
{},
{"i":150,"d":150}
],
#14 = PULSATING_MANA_GEM
[
-1,
{},
{"i":100,"d":500,"e":"Sinusoidal.In"}
],
#15 = PULSATING_MANA_GEM_NIGHTBORNE
[
-1,
{},
{"i":255,"d":250,"e":"Sinusoidal.In"}
],
#16 = SMALL_TICKLE_RANDOM
[
-1,
{},
{"i":{"min":20,"max":80}},
{"i":False,"d":{"min":50,"max":200}},
{},
{"d":{"min":50,"max":200}}
],
#17 = GROIN_RUMBLE_TOTEM
[
-1,
{},
{"i":{"min":100,"max":255},"d":{"min":100,"max":1000}}
],
#18 = VINE_THONG
[
-1,
{"d":{"min":500,"max":3000}},
{"i":{"min":30,"max":255},"d":{"min":500,"max":3000}}
],
#19 = THONG_OF_VALOR
[
-1,
{},
{"i":100,"d":500,"e":"Quartic.In"},
{},
{"i":255,"d":500,"e":"Quartic.In"},
{},
{"d":1000}
]
] | [
448,
796,
685,
198,
197,
15,
11,
198,
197,
2,
352,
796,
317,
7596,
282,
3509,
198,
197,
58,
198,
197,
197,
12,
16,
11,
198,
197,
197,
4895,
72,
1298,
16,
5512,
198,
197,
197,
4895,
72,
1298,
3064,
553,
67,
1298,
9031,
553,
81,... | 1.766575 | 1,825 |
import os
from pyctogram.instagram_client import client
from pyctogram.tests import account
if __name__ == '__main__':
insta_client = client.InstagramClient(account.username, account.password)
insta_client.login()
video_path = os.path.join(os.path.dirname(__file__), 'data', 'example.mp4')
insta_client.upload_video(video_path)
| [
11748,
28686,
198,
6738,
12972,
310,
21857,
13,
8625,
6713,
62,
16366,
1330,
5456,
198,
6738,
12972,
310,
21857,
13,
41989,
1330,
1848,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
916,
64,
6... | 2.85124 | 121 |
from django.urls import path
from .views import snippet_detail_view
from .views import snippet_create_view
from .views import snippet_update_view
from .views import snippet_delete_view
from .views import snippet_list_view
from .search import snippet_search
app_name = "snippets"
urlpatterns = [
path(
"",
view=snippet_list_view,
name="snippets"
),
path(
"add/",
view=snippet_create_view,
name="create"
),
path(
"del/<int:pk>/",
view=snippet_delete_view,
name="delete"
),
path(
"info/<int:pk>/",
view=snippet_detail_view,
name="snippet"
),
path(
"upd/<int:pk>/",
view=snippet_update_view,
name="update"
),
path(
"search",
view=snippet_search,
name="search"
)
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
39442,
62,
49170,
62,
1177,
198,
6738,
764,
33571,
1330,
39442,
62,
17953,
62,
1177,
198,
6738,
764,
33571,
1330,
39442,
62,
19119,
62,
1177,
198,
6738,
7... | 2.004684 | 427 |
from asyncore import loop
numero = int(input('Digite um número: '))
print('''Escolha uma das bases para conversão:
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL''')
opcao = int(input('Sua opção: '))
if opcao == 1:
print('O número {} convertido para binário é igual a {}'.format(numero, bin(numero)[2:]))
elif opcao == 2:
print('O número {} convertido para octal é igual a {}'.format(numero, oct(numero)[2:]))
elif opcao == 3:
print('O número {} convertido para hexadecimal é igual a {}'.format(numero, hex(numero)[2:]))
else:
print('OPÇÃO INVÁLIDA')
| [
6738,
355,
2047,
7295,
1330,
9052,
628,
198,
22510,
3529,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
25,
705,
4008,
198,
4798,
7,
7061,
6,
47051,
349,
3099,
334,
2611,
288,
292,
12536,
31215,
3453,
28749,
25,
... | 2.405512 | 254 |
# https://leetcode.com/problems/closest-binary-search-tree-value/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
565,
418,
395,
12,
39491,
12,
12947,
12,
21048,
12,
8367,
14,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
2... | 2.345455 | 110 |
from threading import Lock, BoundedSemaphore
from polog.handlers.abstract.base import BaseHandler
from polog.core.utils.read_only_singleton import ReadOnlySingleton
class memory_saver(ReadOnlySingleton, BaseHandler):
"""
Класс-заглушка обработчика для тестов.
Подключается как обычный обработчик, но никуда не записывает и не отправляет логи, а только сохраняет их в оперативной памяти.
Сохраненные данным классом логи - экземпляры LogItem.
Последний лог всегда в атрибуте 'last'. Все логи - в атрибуте 'all'.
"""
last = None
all_semaphore = BoundedSemaphore(value=1)
def __call__(self, log):
"""
При вызове экземпляра класса, обновляем информацию о последнем логе, и добавляем новый лог в список со всеми логами.
"""
with Lock():
with self.all_semaphore:
self.all.append(log)
self.last = log
def clean(self):
"""
Очистка старых записей.
Семафор используется, чтобы случайно не очистить список старых логов в тот момент, когда в него идет запись из другого потока.
"""
with self.all_semaphore:
self.all = []
self.last = None
| [
6738,
4704,
278,
1330,
13656,
11,
347,
6302,
13900,
6570,
382,
198,
198,
6738,
755,
519,
13,
4993,
8116,
13,
397,
8709,
13,
8692,
1330,
7308,
25060,
198,
6738,
755,
519,
13,
7295,
13,
26791,
13,
961,
62,
8807,
62,
12215,
10565,
1330... | 1.313789 | 921 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import traceback
import random
import string
import re
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
__fn = os.path.abspath(__file__)
__n = os.path.splitext(os.path.basename(__fn))[0]
__mod = os.path.dirname(__fn)
__pmod = os.path.dirname(__mod)
log = logging.getLogger(__n)
__metaclass__ = type
with open(os.path.join(__mod, '../api/cops_load.python')) as fic:
exec(fic.read(), globals(), locals())
import copsf_api # noqa
import copsf_json # noqa
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def rand_stored_value(self, variables, key,
length=None, force=False, value=None,
*args, **kwargs):
'''
Generate and store a password.
At soon as one is stored with a specific key, it will never be renegerated
unless you set force to true.
'''
reg = load(self, variables, 'local_passwords', registry_format='json')
sav = False
curval = reg.get(key, None)
if not curval:
force = True
if value is not None:
curval = value
elif force:
curval = copsf_api.rand_value(length=length)
if reg.get(key, None) != curval:
reg[key] = curval
sav = True
if sav:
encode(self, variables, 'local_passwords', reg, registry_format='json')
return reg[key]
__funcs__ = {
'json_dump': json_dump,
'json_load': json_load,
'registry_load': load,
'registry_encode': encode,
'rand_stored_value': rand_stored_value,
}
# vim:set et sts=4 ts=4 tw=80:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
... | 2.470668 | 733 |
from CLIF_Framework.framework import event
from CLIF_Framework.framework import tools
from CLIF_Framework.framework import console
from CLIF_Framework.framework import module
from os import system
try:
import readline # noqa: F401
except Exception:
pass
event = event()
tools = tools()
| [
6738,
7852,
5064,
62,
21055,
6433,
13,
30604,
1330,
1785,
198,
6738,
7852,
5064,
62,
21055,
6433,
13,
30604,
1330,
4899,
198,
6738,
7852,
5064,
62,
21055,
6433,
13,
30604,
1330,
8624,
198,
6738,
7852,
5064,
62,
21055,
6433,
13,
30604,
... | 3.54878 | 82 |
from django.db import models
from django.core.validators import RegexValidator
from django import forms
from django.contrib import admin
#Model for user ie. participant
#Model for question
ANSWER_CHOICES = [
('A','A'),
('B','B'),
('C','C'),
('D','D'),
]
#Model for Answer which contains options and foreign ket to question
#Model to store answers given by participant. Stored in the form of comma-separated string
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17633,
3... | 3.310078 | 129 |
"""Module containing all the player classes and the bot strategies."""
from random import choice
class Player:
"""A class that holds the information about the player."""
score = 0
current_turn_score = 0
def __init__(self, name):
"""Configure the instance with it's name."""
self.name = name
def change_name(self, new_name):
"""Change the name to the value of new_name."""
self.name = new_name
def __str__(self):
"""Return a string representing the object."""
return f"<{self.name}, score: {self.score}>"
def finish_turn(self):
"""Finishe a player's turn."""
self.score += self.current_turn_score
self.current_turn_score = 0
class HumanPlayer(Player):
"""Separate class to identify human players."""
pass
class BotPlayer(Player):
"""A bot player. Has a strategy that dictates it's decisions."""
strategy = None
def __init__(self, name, strategy=None):
"""Configure the bot, with a name and strategy."""
super().__init__(name)
# if no strategy is provided, one is chosen at random
if strategy is None:
strategy = choice(
[LowRiskStrategy(), MediumRiskStrategy(), HighRiskStrategy()]
)
self.strategy = strategy
class Strategy:
"""Class used to take decisions for the bots."""
threshold = 0
def should_roll(self, current_score):
"""Return true if the decision is to play, false otherwise."""
return current_score <= self.threshold
class LowRiskStrategy(Strategy):
"""LowRiskStrategy for a bot, with the threshold of 6."""
threshold = 6
class MediumRiskStrategy(Strategy):
"""MediumRiskStrategy for a bot, with the threshold of 12."""
threshold = 12
class HighRiskStrategy(Strategy):
"""HighRiskStrategy for a bot, with the threshold of 18."""
threshold = 18
| [
37811,
26796,
7268,
477,
262,
2137,
6097,
290,
262,
10214,
10064,
526,
15931,
198,
6738,
4738,
1330,
3572,
628,
198,
4871,
7853,
25,
198,
220,
220,
220,
37227,
32,
1398,
326,
6622,
262,
1321,
546,
262,
2137,
526,
15931,
628,
220,
220,... | 2.761429 | 700 |
import sys, requests, time
from json import load, dumps
# Input your cloudflare zone id. It can be change the value.
# It need to input the zone id you want to.
ZONE_ID = 'a0bb2d04c8e6e66a2aecc9bbd1741f51'
# Input the cloudflare api key. (It will be Private key)
# It needs to input your key.
API_KEY = '0000000000000000000000000000000000000000'
# Getting your information from email address.
EMAIL = 'ruskonert@gmail.com'
CONTENT_TYPE = 'application/json'
CLOUDFLARE_API_ZONE_URL = 'https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{record_identifier}'
def update_dns_record(record_id, dns_record_type, name, content, optional = None):
"""
DNS records for a zone update dns record, It refers to here:
PUT zones/:zone_identifier/dns_records/:identifier
"""
header = get_response_header()
data = {'type': dns_record_type, 'name': name, 'content': content }
json_data = dumps(data)
response = requests.put(CLOUDFLARE_API_ZONE_URL.format(zone_id = ZONE_ID, record_identifier = record_id), headers = header, data=json_data)
result = response.json()['success']
if result is None:
raise requests.RequestException("Request failed")
else:
return result
if __name__ == "__main__":
main()
| [
11748,
25064,
11,
7007,
11,
640,
198,
198,
6738,
33918,
1330,
3440,
11,
45514,
198,
198,
2,
23412,
534,
6279,
2704,
533,
6516,
4686,
13,
632,
460,
307,
1487,
262,
1988,
13,
198,
2,
632,
761,
284,
5128,
262,
6516,
4686,
345,
765,
2... | 2.717021 | 470 |
#! /usr/bin/env python
from __future__ import print_function
import sys
import json
from collections import Counter
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
11748,
33918,
198,
6738,
17268,
1330,
15034,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12... | 3.137255 | 51 |
from multiprocessing.sharedctypes import Value
import networkx as nx
import numpy as np
from awpy.data import NAV, NAV_GRAPHS
from scipy.spatial import distance
def point_in_area(map_name, area_id, point):
"""Returns if the point is within a nav area for a map.
Args:
map_name (string): Map to search
area_id (int): Area ID as an integer
point (list): Point as a list [x,y,z]
Returns:
True if area contains the point, false if not
"""
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if area_id not in NAV[map_name].keys():
raise ValueError("Area ID not found.")
if len(point) != 3:
raise ValueError("Point must be a list [X,Y,Z]")
contains_x = (
min(NAV[map_name][area_id]["northWestX"], NAV[map_name][area_id]["southEastX"])
< point[0]
< max(
NAV[map_name][area_id]["northWestX"], NAV[map_name][area_id]["southEastX"]
)
)
contains_y = (
min(NAV[map_name][area_id]["northWestY"], NAV[map_name][area_id]["southEastY"])
< point[1]
< max(
NAV[map_name][area_id]["northWestY"], NAV[map_name][area_id]["southEastY"]
)
)
if contains_x and contains_y:
return True
else:
return False
def find_closest_area(map_name, point):
"""Finds the closest area in the nav mesh. Searches through all the areas by comparing point to area centerpoint.
Args:
map_name (string): Map to search
point (list): Point as a list [x,y,z]
Returns:
A dict containing info on the closest area
"""
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if len(point) != 3:
raise ValueError("Point must be a list [X,Y,Z]")
closest_area = {"mapName": map_name, "areaId": None, "distance": 999999}
for area in NAV[map_name].keys():
avg_x = (
NAV[map_name][area]["northWestX"] + NAV[map_name][area]["southEastX"]
) / 2
avg_y = (
NAV[map_name][area]["northWestY"] + NAV[map_name][area]["southEastY"]
) / 2
avg_z = (
NAV[map_name][area]["northWestZ"] + NAV[map_name][area]["southEastZ"]
) / 2
dist = np.sqrt(
(point[0] - avg_x) ** 2 + (point[1] - avg_y) ** 2 + (point[2] - avg_z) ** 2
)
if dist < closest_area["distance"]:
closest_area["areaId"] = area
closest_area["distance"] = dist
return closest_area
def area_distance(map_name, area_a, area_b, dist_type="graph"):
"""Returns the distance between two areas. Dist type an be graph or geodesic.
Args:
map_name (string): Map to search
area_a (int): Area id
area_b (int): Area id
dist_type (string): String indicating the type of distance to use (graph or geodesic)
Returns:
A dict containing info on the path between two areas.
"""
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if (area_a not in NAV[map_name].keys()) or (area_b not in NAV[map_name].keys()):
raise ValueError("Area ID not found.")
if dist_type not in ["graph", "geodesic"]:
raise ValueError("dist_type can only be graph or geodesic")
G = NAV_GRAPHS[map_name]
distance_obj = {"distanceType": dist_type, "distance": None, "areas": []}
if dist_type == "graph":
discovered_path = nx.shortest_path(G, area_a, area_b)
distance_obj["distance"] = len(discovered_path) - 1
distance_obj["areas"] = discovered_path
return distance_obj
if dist_type == "geodesic":
geodesic_path = nx.astar_path(G, area_a, area_b, heuristic=dist)
geodesic_cost = 0
for i, area in enumerate(geodesic_path):
if i > 0:
geodesic_cost += G.nodes()[area]["size"]
distance_obj["distance"] = geodesic_cost
distance_obj["areas"] = geodesic_path
return distance_obj
def point_distance(map_name, point_a, point_b, dist_type="graph"):
"""Returns the distance between two points.
Args:
map_name (string): Map to search
point_a (list): Point as a list (x,y,z)
point_b (list): Point as a list (x,y,z)
dist_type (string): String indicating the type of distance to use. Can be graph, geodesic, euclidena, manhattan, canberra or cosine.
Returns:
A dict containing info on the distance between two points.
"""
distance_obj = {"distanceType": dist_type, "distance": None, "areas": []}
if dist_type == "graph":
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if len(point_a) != 3 or len(point_b) != 3:
raise ValueError(
"When using graph or geodesic distance, point must be X/Y/Z"
)
area_a = find_closest_area(map_name, point_a)["areaId"]
area_b = find_closest_area(map_name, point_b)["areaId"]
return area_distance(map_name, area_a, area_b, dist_type=dist_type)
elif dist_type == "geodesic":
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if len(point_a) != 3 or len(point_b) != 3:
raise ValueError(
"When using graph or geodesic distance, point must be X/Y/Z"
)
area_a = find_closest_area(map_name, point_a)["areaId"]
area_b = find_closest_area(map_name, point_b)["areaId"]
return area_distance(map_name, area_a, area_b, dist_type=dist_type)
elif dist_type == "euclidean":
distance_obj["distance"] = distance.euclidean(point_a, point_b)
return distance_obj
elif dist_type == "manhattan":
distance_obj["distance"] = distance.cityblock(point_a, point_b)
return distance_obj
elif dist_type == "canberra":
distance_obj["distance"] = distance.canberra(point_a, point_b)
return distance_obj
elif dist_type == "cosine":
distance_obj["distance"] = distance.cosine(point_a, point_b)
return distance_obj
def generate_position_token(map_name, frame):
"""Generates the position token for a game frame.
Args:
map_name (string): Map to search
frame (dict): A game frame
Returns:
A dict containing the T token, CT token and combined token (T + CT concatenated)
"""
if map_name not in NAV.keys():
raise ValueError("Map not found.")
if (len(frame["ct"]["players"]) == 0) or (len(frame["t"]["players"]) == 0):
raise ValueError("CT or T players has length of 0")
# Create map area list
map_area_names = []
for area_id in NAV[map_name]:
if NAV[map_name][area_id]["areaName"] not in map_area_names:
map_area_names.append(NAV[map_name][area_id]["areaName"])
map_area_names.sort()
# Create token
ct_token = np.zeros(len(map_area_names), dtype=np.int8)
for player in frame["ct"]["players"]:
if player["isAlive"]:
closest_area = find_closest_area(
map_name, [player["x"], player["y"], player["z"]]
)
ct_token[
map_area_names.index(NAV[map_name][closest_area["areaId"]]["areaName"])
] += 1
t_token = np.zeros(len(map_area_names), dtype=np.int8)
for player in frame["t"]["players"]:
if player["isAlive"]:
closest_area = find_closest_area(
map_name, [player["x"], player["y"], player["z"]]
)
t_token[
map_area_names.index(NAV[map_name][closest_area["areaId"]]["areaName"])
] += 1
# Create payload
token = {}
token["tToken"] = (
str(t_token).replace("'", "").replace("[", "").replace("]", "").replace(" ", "")
)
token["ctToken"] = (
str(ct_token)
.replace("'", "")
.replace("[", "")
.replace("]", "")
.replace(" ", "")
)
token["token"] = token["ctToken"] + token["tToken"]
return token
| [
6738,
18540,
305,
919,
278,
13,
28710,
310,
9497,
1330,
11052,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
3253,
9078,
13,
7890,
1330,
47140,
11,
47140,
62,
10761,
2969,
7998,
198,
6738,
629... | 2.238943 | 3,595 |
"""CFNgin lookup registry."""
import logging
import warnings
from six import string_types
from runway.lookups.handlers import ssm
from runway.util import load_object_from_string
from ..exceptions import FailedVariableLookup, UnknownLookupType
from .handlers import ami, default, dynamodb, envvar
from .handlers import file as file_handler
from .handlers import hook_data, kms, output, rxref, split, ssmstore, xref
CFNGIN_LOOKUP_HANDLERS = {}
def register_lookup_handler(lookup_type, handler_or_path):
"""Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under.
handler_or_path (Union[Callable, str]): A function or a path to a
handler.
"""
handler = handler_or_path
if isinstance(handler_or_path, string_types):
handler = load_object_from_string(handler_or_path)
CFNGIN_LOOKUP_HANDLERS[lookup_type] = handler
if not isinstance(handler, type):
# Hander is a not a new-style handler
logger = logging.getLogger(__name__)
logger.warning("Registering lookup `%s`: Please upgrade to use the "
"new style of Lookups.", lookup_type)
warnings.warn(
# For some reason, this does not show up...
# Leaving it in anyway
"Lookup `%s`: Please upgrade to use the new style of Lookups"
"." % lookup_type,
DeprecationWarning,
stacklevel=2,
)
def unregister_lookup_handler(lookup_type):
"""Unregister the specified lookup type.
This is useful when testing various lookup types if you want to unregister
the lookup type after the test runs.
Args:
lookup_type (str): Name of the lookup type to unregister.
"""
CFNGIN_LOOKUP_HANDLERS.pop(lookup_type, None)
def resolve_lookups(variable, context, provider):
"""Resolve a set of lookups.
Args:
variable (:class:`runway.cfngin.variables.Variable`): The variable
resolving it's lookups.
context (:class:`runway.cfngin.context.Context`): Context instance.
provider (:class:`runway.cfngin.providers.base.BaseProvider`): Provider
instance.
Returns:
Dict[str, Any]: Lookup -> resolved value
"""
resolved_lookups = {}
for lookup in variable.lookups:
try:
handler = CFNGIN_LOOKUP_HANDLERS[lookup.type]
except KeyError:
raise UnknownLookupType(lookup)
try:
resolved_lookups[lookup] = handler(
value=lookup.input,
context=context,
provider=provider,
)
except Exception as err:
raise FailedVariableLookup(variable.name, lookup, err)
return resolved_lookups
register_lookup_handler(ami.TYPE_NAME, ami.AmiLookup)
register_lookup_handler(default.TYPE_NAME, default.DefaultLookup)
register_lookup_handler(dynamodb.TYPE_NAME, dynamodb.DynamodbLookup)
register_lookup_handler(envvar.TYPE_NAME, envvar.EnvvarLookup)
register_lookup_handler(file_handler.TYPE_NAME, file_handler.FileLookup)
register_lookup_handler(hook_data.TYPE_NAME, hook_data.HookDataLookup)
register_lookup_handler(kms.TYPE_NAME, kms.KmsLookup)
register_lookup_handler(output.TYPE_NAME, output.OutputLookup)
register_lookup_handler(rxref.TYPE_NAME, rxref.RxrefLookup)
register_lookup_handler(split.TYPE_NAME, split.SplitLookup)
register_lookup_handler(ssm.TYPE_NAME, ssm.SsmLookup)
register_lookup_handler(ssmstore.TYPE_NAME, ssmstore.SsmstoreLookup)
register_lookup_handler(xref.TYPE_NAME, xref.XrefLookup)
| [
37811,
22495,
45,
1655,
35847,
20478,
526,
15931,
198,
11748,
18931,
198,
11748,
14601,
198,
198,
6738,
2237,
1330,
4731,
62,
19199,
198,
198,
6738,
23443,
13,
5460,
4739,
13,
4993,
8116,
1330,
264,
5796,
198,
6738,
23443,
13,
22602,
13... | 2.490291 | 1,442 |
from types import SimpleNamespace
from main import main, get_parser
for lg in ["en", "id", "sv"]:
for model in ["poincare", "euclidean"]:
try:
args = get_parser()
args.data_dir = f"data/dbpedia/{lg}/"
# args.model_reload_path = "dumps/dbpedia/en/euclidean_model.pt"
args.model_reload_path = f"dumps/dbpedia/{lg}/{model}_model.pt"
args.model = model
args.batch_size = 2048
args.dim = 40
args.cuda = True
args.eval_only = True
main(args)
except:
continue
| [
6738,
3858,
1330,
17427,
36690,
10223,
198,
6738,
1388,
1330,
1388,
11,
651,
62,
48610,
198,
198,
1640,
300,
70,
287,
14631,
268,
1600,
366,
312,
1600,
366,
21370,
1,
5974,
198,
220,
220,
220,
329,
2746,
287,
14631,
7501,
1939,
533,
... | 1.896875 | 320 |
# -*- coding: utf-8 -*-
from django.test.client import Client
from django.test.utils import override_settings
from openslides.utils.test import TestCase
@override_settings(INSTALLED_PLUGINS=('tests.plugin_api.test_plugin_one',))
@override_settings(INSTALLED_PLUGINS=('tests.plugin_api.test_plugin_two',))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
9288,
13,
16366,
1330,
20985,
198,
6738,
42625,
14208,
13,
9288,
13,
26791,
1330,
20957,
62,
33692,
198,
198,
6738,
9808,
75,
1460,
13,
2679... | 2.827273 | 110 |
"""
Tankity-tank-tank-tank
Example program for Qt Animation using QGraphicsScene
"""
import sys
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtCore as qtc
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
BORDER_HEIGHT = 100
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
# it's required to save a reference to MainWindow.
# if it goes out of scope, it will be destroyed.
mw = MainWindow()
sys.exit(app.exec())
| [
37811,
198,
32978,
414,
12,
28451,
12,
28451,
12,
28451,
198,
198,
16281,
1430,
329,
33734,
23535,
1262,
1195,
18172,
36542,
198,
37811,
198,
198,
11748,
25064,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
355,
10662,
4246,... | 2.642857 | 182 |
from tkinter import *
from gamemodes.locales.localeManager import *
import platform
| [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
9106,
368,
4147,
13,
17946,
2040,
13,
17946,
1000,
13511,
1330,
1635,
201,
198,
11748,
3859,
201
] | 3.307692 | 26 |
from flask_restful import Resource
from lyrebird.mock import context, plugin_manager
from flask import request, jsonify, abort
from lyrebird import application
class Conf(Resource):
"""
Lyrebird 及 插件 配置文件获取和修改
"""
class ResetConf(Resource):
"""
Lyrebird 及 插件 配置文件重置
"""
| [
6738,
42903,
62,
2118,
913,
1330,
20857,
198,
6738,
22404,
260,
16944,
13,
76,
735,
1330,
4732,
11,
13877,
62,
37153,
198,
6738,
42903,
1330,
2581,
11,
33918,
1958,
11,
15614,
198,
6738,
22404,
260,
16944,
1330,
3586,
628,
198,
4871,
... | 2.098592 | 142 |
#!/usr/bin/env python
from __future__ import print_function
import sys
from pynauty import autgrp
# List of graphs for testing
#
# Structure:
# [[name, Graph, numorbit, grpsize, generators]]
#
# numorbit, grpsize, generators was calculated by dreadnut
#
from data_graphs import graphs
if __name__ == '__main__':
print('Testing pynauty.autgrp()')
print('Python version: ' + sys.version)
print('Starting ...')
passed = 0
failed = 0
for gname, g, numorbit, grpsize, gens in graphs:
print('%-17s ...' % gname, end=' ')
sys.stdout.flush()
generators, order, o2, orbits, orbit_no = autgrp(g)
if generators == gens and orbit_no == numorbit and order == grpsize:
print('OK')
passed += 1
else:
print('failed')
failed +=1
print('... done.')
if failed > 0:
print('passed = %d failed = %d' % (passed, failed))
else:
print('All tests passed.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
6738,
279,
2047,
2306,
88,
1330,
1960,
2164,
79,
198,
198,
2,
7343,
286,
28770,
329,
4856,
198,
2,
198,
2,... | 2.369565 | 414 |
OCTICON_ISSUE_OPENED = """
<svg class="octicon octicon-opened" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M8 1.5a6.5 6.5 0 100 13 6.5 6.5 0 000-13zM0 8a8 8 0 1116 0A8 8 0 010 8zm9 3a1 1 0 11-2 0 1 1 0 012 0zm-.25-6.25a.75.75 0 00-1.5 0v3.5a.75.75 0 001.5 0v-3.5z"></path></svg>
"""
| [
198,
46,
4177,
2149,
1340,
62,
16744,
8924,
62,
3185,
1677,
1961,
796,
37227,
198,
27,
21370,
70,
1398,
2625,
38441,
4749,
19318,
4749,
12,
26350,
1,
35555,
5907,
2625,
4023,
1378,
2503,
13,
86,
18,
13,
2398,
14,
11024,
14,
21370,
7... | 1.917127 | 181 |
#!/usr/bin/python3
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
201
] | 2.111111 | 9 |
"""
Abstract training class
"""
from abc import ABC as AbstractBaseClass
from abc import abstractmethod
| [
37811,
198,
23839,
3047,
1398,
198,
37811,
198,
6738,
450,
66,
1330,
9738,
355,
27741,
14881,
9487,
198,
6738,
450,
66,
1330,
12531,
24396,
628
] | 4.2 | 25 |
import torchvision.transforms as transforms
| [
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
628,
198
] | 4.6 | 10 |
from __future__ import annotations
from typing import TYPE_CHECKING
from easybill_rest.helper import Helper
from easybill_rest.resources.resource_abstract import ResourceAbstract
if TYPE_CHECKING:
from easybill_rest import Client
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
6738,
2562,
35546,
62,
2118,
13,
2978,
525,
1330,
5053,
525,
198,
6738,
2562,
35546,
62,
2118,
13,
37540,
13,
31092,
62,
397,
8709,
1... | 3.71875 | 64 |
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from logger import Logger
batch_size = 128
learning_rate = 1e-2
num_epoches = 20
train_dataset = datasets.MNIST(
root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(
root='./data', train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model = Cnn(1, 10)
use_gpu = torch.cuda.is_available()
if use_gpu:
model = model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
logger = Logger('./logs')
for epoch in range(num_epoches):
print('epoch {}'.format(epoch + 1))
print('*' * 10)
running_loss = 0.0
running_acc = 0.0
for i, data in enumerate(train_loader, 1):
img, label = data
if use_gpu:
img = img.cuda()
label = label.cuda()
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
running_loss += loss.data * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
accuracy = (pred == label).float().mean()
running_acc += num_correct.data
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ========================= Log ======================
step = epoch * len(train_loader) + i
# (1) Log the scalar values
info = {'loss': loss.data, 'accuracy': accuracy.data}
for tag, value in info.items():
logger.scalar_summary(tag, value, step)
# (2) Log values and gradients of the parameters (histogram)
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, to_np(value), step)
logger.histo_summary(tag + '/grad', to_np(value.grad), step)
# (3) Log the images
info = {'images': to_np(img.view(-1, 28, 28)[:10])}
for tag, images in info.items():
logger.image_summary(tag, images, step)
if i % 300 == 0:
print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, num_epoches, running_loss / (batch_size * i),
running_acc / (batch_size * i)))
print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(
train_dataset))))
model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
img, label = data
if use_gpu:
img = Variable(img, volatile=True).cuda()
label = Variable(label, volatile=True).cuda()
else:
img = Variable(img, volatile=True)
label = Variable(label, volatile=True)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.data * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.data
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_dataset)), eval_acc / (len(test_dataset))))
print()
torch.save(model.state_dict(), './cnn.pth')
| [
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
... | 2.275129 | 1,552 |
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
DCCsi/Tools/DCC/Blender/__init__.py
This init allows us to treat Blender setup as a DCCsi tools python package
"""
# -------------------------------------------------------------------------
# standard imports
import os
import site
from pathlib import Path
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# global scope
_PACKAGENAME = 'Tools.DCC.Blender'
__all__ = ['config',
'constants',
'setup',
'start']
_LOGGER = _logging.getLogger(_PACKAGENAME)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# set up access to this Blender folder as a pkg
_MODULE_PATH = Path(__file__) # To Do: what if frozen?
_DCCSI_TOOLS_BLENDER_PATH = Path(_MODULE_PATH.parent)
# we need to set up basic access to the DCCsi
_PATH_DCCSI_TOOLS_DCC = Path(_DCCSI_TOOLS_BLENDER_PATH.parent)
_PATH_DCCSI_TOOLS_DCC = Path(os.getenv('PATH_DCCSI_TOOLS_DCC', _PATH_DCCSI_TOOLS_DCC.as_posix()))
site.addsitedir(_PATH_DCCSI_TOOLS_DCC.as_posix())
# we need to set up basic access to the DCCsi
_PATH_DCCSI_TOOLS = Path(_PATH_DCCSI_TOOLS_DCC.parent)
_PATH_DCCSI_TOOLS = Path(os.getenv('PATH_DCCSI_TOOLS', _PATH_DCCSI_TOOLS.as_posix()))
# we need to set up basic access to the DCCsi
_PATH_DCCSIG = Path.joinpath(_DCCSI_TOOLS_BLENDER_PATH, '../../..').resolve()
_PATH_DCCSIG = Path(os.getenv('PATH_DCCSIG', _PATH_DCCSIG.as_posix()))
site.addsitedir(_PATH_DCCSIG.as_posix())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# now we have access to the DCCsi code and azpy
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
from azpy.constants import ENVAR_DCCSI_LOGLEVEL
from azpy.constants import ENVAR_DCCSI_GDEBUGGER
from azpy.constants import FRMT_LOG_LONG
# global space
_DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False)
_DCCSI_GDEBUGGER = env_bool(ENVAR_DCCSI_GDEBUGGER, 'WING')
# default loglevel to info unless set
_DCCSI_LOGLEVEL = int(env_bool(ENVAR_DCCSI_LOGLEVEL, _logging.INFO))
if _DCCSI_GDEBUG:
# override loglevel if runnign debug
_DCCSI_LOGLEVEL = _logging.DEBUG
_logging.basicConfig(level=_DCCSI_LOGLEVEL,
format=FRMT_LOG_LONG,
datefmt='%m-%d %H:%M')
_LOGGER = _logging.getLogger(_PACKAGENAME)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def attach_debugger():
"""!
This will attemp to attch the WING debugger
To Do: other IDEs for debugging not yet implemented.
This should be replaced with a plugin based dev package."""
_DCCSI_GDEBUG = True
os.environ["DYNACONF_DCCSI_GDEBUG"] = str(_DCCSI_GDEBUG)
_DCCSI_DEV_MODE = True
os.environ["DYNACONF_DCCSI_DEV_MODE"] = str(_DCCSI_DEV_MODE)
from azpy.test.entry_test import connect_wing
_debugger = connect_wing()
return _debugger
if _DCCSI_DEV_MODE:
attach_debugger()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# message collection
_LOGGER.debug(f'Initializing: {_PACKAGENAME}')
_LOGGER.debug(f'_MODULE_PATH: {_MODULE_PATH}')
_LOGGER.debug(f'PATH_DCCSIG: {_PATH_DCCSIG}')
_LOGGER.debug(f'PATH_DCCSI_TOOLS: {_PATH_DCCSI_TOOLS}')
_LOGGER.debug(f'PATH_DCCSI_TOOLS_DCC: {_PATH_DCCSI_TOOLS_DCC}')
_LOGGER.debug(f'DCCSI_TOOLS_BLENDER_PATH: {_DCCSI_TOOLS_BLENDER_PATH}')
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
if _DCCSI_DEV_MODE:
# If in dev mode this will test imports of __all__
_LOGGER.debug(f'Testing Imports from {_PACKAGENAME}')
test_imports(__all__)
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run as main, perform debug and tests"""
pass
| [
2,
19617,
25,
40477,
12,
23,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
198,
2,
1114,
1844,
6634,
290,
5964,
2846,
3387,
766,
262,
38559,
24... | 3.057108 | 1,646 |
import numpy as np
import pandas as pd | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67
] | 3.166667 | 12 |
import pytest
from models.base import Database
@pytest.fixture()
| [
11748,
12972,
9288,
198,
198,
6738,
4981,
13,
8692,
1330,
24047,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
198
] | 3.238095 | 21 |
import numpy as np
import pandas as pd
from sklearn import preprocessing as prep
from sklearn.preprocessing import MinMaxScaler
from collections import deque
from quik import prices
import random
if __name__ == "__main__":
X,y = get_training_data(lag = 500)
print(X,y)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
1341,
35720,
1330,
662,
36948,
355,
3143,
201,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1855,
11518,
3351,
36213,
201,
198,
6738,
17268,
133... | 2.11976 | 167 |
import networkx as nx
import numpy as np
import scphylo as scp
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
629,
746,
2645,
78,
355,
629,
79,
628,
198
] | 2.75 | 24 |
from lib.data_helper import DataHelper as dh
# single trail sc
test_text_1 = "this is my text)"
test_result_1 = dh.strip_if_not_none(test_text_1, ")., ")
test_result_1s = test_text_1.strip(")., ")
test_result_1r = dh.remove_multiple_outbound_chars(test_text_1)
# multi trail sc
test_text_2 = "this is my text)..."
test_result_2 = dh.strip_if_not_none(test_text_2, ")., ")
test_result_2s = test_text_2.strip(")., ")
test_result_2r = dh.remove_multiple_outbound_chars(test_text_2)
# single start sc multi trail sc
test_text_3 = ")this is my text)..."
test_result_3 = dh.strip_if_not_none(test_text_3, ")., ")
test_result_3s = test_text_3.strip(")., ")
test_result_3r = dh.remove_multiple_outbound_chars(test_text_3)
# multi start sc multi trail sc
test_text_4 = ")....this is my text)..."
test_result_4 = dh.strip_if_not_none(test_text_4, ")., ")
test_result_4s = test_text_4.strip(")., ")
test_result_4r = dh.remove_multiple_outbound_chars(test_text_4)
# with spaces
test_text_5 = ").. ..this is my text). .."
test_result_5 = dh.strip_if_not_none(test_text_5, ")., ")
test_result_5s = test_text_5.strip(")., ")
test_result_5r = dh.remove_multiple_outbound_chars(test_text_5)
# non-pattern break
test_text_6 = ").(...this is my text).(.."
test_result_6 = dh.strip_if_not_none(test_text_6, ")., ")
test_result_6s = test_text_6.strip(")., ")
test_result_6r = dh.remove_multiple_outbound_chars(test_text_6)
print("done")
# strip for comparison
test_strip = " u."
test_strip_1 = test_strip.strip(". ")
print("done2")
| [
6738,
9195,
13,
7890,
62,
2978,
525,
1330,
6060,
47429,
355,
34590,
628,
198,
2,
2060,
8025,
629,
198,
9288,
62,
5239,
62,
16,
796,
366,
5661,
318,
616,
2420,
16725,
198,
9288,
62,
20274,
62,
16,
796,
34590,
13,
36311,
62,
361,
62... | 2.392801 | 639 |
import requests
client_id = "..."
client_secret = "..."
try:
auth = requests.auth.HTTPBasicAuth(client_id, client_secret);
resp = requests.post('https://stepik.org/oauth2/token/', data={'grant_type': 'client_credentials'},auth=auth)
token = resp.json()['access_token']
except:
print('problems getting token')
api_url = "https://stepik.org/api/";
author_courses = {}
page_ind = 1
print('Please, wait a bit while list of courses is being processed')
# in this cycle we get all courses' titles and their authors
while True:
page = requests.get(api_url + 'courses' + '?page=' + str(page_ind) + '&language=ru', headers={'Authorization': 'Bearer ' + token}).json()
for course in page['courses']:
# ignore 'dead' courses
if course['discussions_count'] == 0:
continue
for author in course['authors']:
if not author in author_courses:
author_courses.update({author : set()})
# for each author we will have all courses that were created with his participation
author_courses[author].add(course['title'])
if not page['meta']['has_next']:
break
page_ind += 1
for user_id,titles in author_courses.items():
user = requests.get(api_url + 'users/' + str(user_id)).json()['users'][0]
print()
print(user['first_name'], user['last_name'], 'took part in creating ', end='')
if len(titles) == 1:
print('this course:')
else:
print('these courses:')
for title in titles:
print(title)
| [
11748,
7007,
198,
198,
16366,
62,
312,
796,
366,
9313,
198,
16366,
62,
21078,
796,
366,
9313,
198,
28311,
25,
198,
220,
220,
220,
6284,
796,
7007,
13,
18439,
13,
40717,
26416,
30515,
7,
16366,
62,
312,
11,
5456,
62,
21078,
1776,
198... | 2.565724 | 601 |
'''
Function:
天眼查
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import requests
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
'''天眼查''' | [
7061,
6,
198,
22203,
25,
198,
220,
220,
220,
36469,
102,
40367,
120,
162,
253,
98,
198,
13838,
25,
198,
220,
220,
220,
7516,
198,
36181,
106,
46479,
94,
17739,
105,
27670,
245,
20998,
115,
25,
198,
220,
220,
220,
7516,
21410,
19021,... | 1.774775 | 111 |
max_num = 90
num = 1
while True:
if num > 1:
result1 = num * 10 + num - 1
if result1 > max_num:
break
print(result1)
result2 = num * 10 + num + 1
if result2 > max_num:
break
print(result2)
num += 1
| [
9806,
62,
22510,
796,
4101,
201,
198,
22510,
796,
352,
201,
198,
201,
198,
4514,
6407,
25,
201,
198,
220,
220,
220,
611,
997,
1875,
352,
25,
201,
198,
220,
220,
220,
220,
220,
220,
220,
1255,
16,
796,
997,
1635,
838,
1343,
997,
... | 1.847682 | 151 |
"""
Various utilities around voxel grids.
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def convert_atom_to_voxel(coordinates, atom_index, box_width, voxel_width):
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape
(N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
A list containing a numpy array of length 3 with `[i, j, k]`, the
voxel coordinates of specified atom. This is returned a list so it
has the same API as convert_atom_pair_to_voxel
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
logger.warning('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, coordinates[atom_index], indices))
return [indices]
def convert_atom_pair_to_voxel(coordinates_tuple, atom_index_pair, box_width,
voxel_width):
"""Converts a pair of atoms to a list of i,j,k tuples.
Parameters
----------
coordinates_tuple: tuple
A tuple containing two molecular coordinate arrays of shapes `(N, 3)` and `(M, 3)`.
atom_index_pair: tuple
A tuple of indices for the atoms in the two molecules.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
A list containing two numpy array of length 3 with `[i, j, k]`, the
voxel coordinates of specified atom.
"""
indices_list = []
indices_list.append(
convert_atom_to_voxel(coordinates_tuple[0], atom_index_pair[0], box_width,
voxel_width)[0])
indices_list.append(
convert_atom_to_voxel(coordinates_tuple[1], atom_index_pair[1], box_width,
voxel_width)[0])
return (indices_list)
def voxelize(get_voxels,
box_width,
voxel_width,
hash_function,
coordinates,
feature_dict=None,
feature_list=None,
nb_channel=16,
dtype="np.int8"):
"""Helper function to voxelize inputs.
This helper function helps convert a hash function which
specifies spatial features of a molecular complex into a voxel
tensor. This utility is used by various featurizers that generate
voxel grids.
Parameters
----------
get_voxels: function
Function that voxelizes inputs
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid in Angstroms.
hash_function: function
Used to map feature choices to voxel channels.
coordinates: np.ndarray
Contains the 3D coordinates of a molecular system.
feature_dict: dict
Keys are atom indices or tuples of atom indices, the values are
computed features. If `hash_function is not None`, then the values
are hashed using the hash function into `[0, nb_channels)` and
this channel at the voxel for the given key is incremented by `1`
for each dictionary entry. If `hash_function is None`, then the
value must be a vector of size `(n_channels,)` which is added to
the existing channel values at that voxel grid.
feature_list: list
List of atom indices or tuples of atom indices. This can only be
used if `nb_channel==1`. Increments the voxels corresponding to
these indices by `1` for each entry.
nb_channel: int (Default 16)
The number of feature channels computed per voxel. Should
be a power of 2.
dtype: type
The dtype of the numpy ndarray created to hold features.
Returns
-------
Tensor of shape (voxels_per_edge, voxels_per_edge,
voxels_per_edge, nb_channel),
"""
# Number of voxels per one edge of box to voxelize.
voxels_per_edge = int(box_width / voxel_width)
if dtype == "np.int8":
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.int8)
else:
feature_tensor = np.zeros(
(voxels_per_edge, voxels_per_edge, voxels_per_edge, nb_channel),
dtype=np.float16)
if feature_dict is not None:
for key, features in feature_dict.items():
voxels = get_voxels(coordinates, key, box_width, voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
if hash_function is not None:
feature_tensor[voxel[0], voxel[1], voxel[2],
hash_function(features, nb_channel)] += 1.0
else:
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += features
elif feature_list is not None:
for key in feature_list:
voxels = get_voxels(coordinates, key, box_width, voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < voxels_per_edge)).all():
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0
return feature_tensor
| [
37811,
198,
40009,
20081,
1088,
410,
1140,
417,
50000,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4299,
10385,
62,
37696,
... | 2.512891 | 2,172 |
import numpy as np
from ..initialization import DefaultInitializer
from ..operators import Operators
from ..evaluation import SimpleFitness
from ..utility.history import History
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
36733,
1634,
1330,
15161,
24243,
7509,
198,
6738,
11485,
3575,
2024,
1330,
6564,
2024,
198,
6738,
11485,
18206,
2288,
1330,
17427,
37,
3659,
198,
6738,
11485,
315,
879,
13,
23569,
1330... | 4.285714 | 42 |
import os
import time
import random
import datetime
import re
from nonebot import on_command, on_regex
from nonebot.typing import T_State
from nonebot.params import State
from nonebot.adapters.onebot.v11.bot import Bot
from nonebot.adapters.onebot.v11.event import Event
# 今天是几号星期
WEEK = ['一', '二', '三', '四', '五', '六', '日']
week_day = on_regex('(.*)是?(星期|周)几', block=False)
@week_day.handle()
day = on_regex('(今天)是?几号', block=False)
@day.handle() | [
11748,
28686,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
6738,
4844,
13645,
1330,
319,
62,
21812,
11,
319,
62,
260,
25636,
198,
198,
6738,
4844,
13645,
13,
774,
13886,
1330,
309,
62,
9012,
198,
67... | 2.281407 | 199 |
from django.urls import path
from case_admin.views import common, case, comment, tag, user, question
app_name = "case_admin"
urlpatterns = [
path("users/", user.view_admin_user, name='users'),
path("users/review", user.view_admin_user_review, name='users_review'),
path("users/<int:user_id>", user.api_admin_user, name='api_users'),
path("cases/", case.view_admin_case, name='cases'),
path("cases/review", case.view_admin_case_review, name='cases_review'),
path("cases/<int:case_id>", case.api_admin_case, name='api_cases'),
path("questions/", question.view_admin_question, name='questions'),
path("questions/import", question.api_admin_question_import, name='tquestion_import'),
path("questions/<int:question_id>", question.api_admin_question, name='api_questions'),
path("tags/", tag.view_admin_tag, name='tags'),
path("tags/import", tag.api_admin_tag_import, name='tag_import'),
path("tags/<int:tag_id>", tag.api_admin_tag, name='api_tags'),
path("comments/", comment.view_admin_comment, name='comments'),
path("comments/review", comment.view_admin_comment_review, name='comments_review'),
path("comments/<int:comment_id>", comment.api_admin_comment, name='api_comments'),
path("", common.view_landing, name='default'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
1339,
62,
28482,
13,
33571,
1330,
2219,
11,
1339,
11,
2912,
11,
7621,
11,
2836,
11,
1808,
198,
198,
1324,
62,
3672,
796,
366,
7442,
62,
28482,
1,
198,
198,
6371,
33279,
... | 2.820961 | 458 |
n = int(input())
print(dest(n, n)) | [
198,
77,
796,
493,
7,
15414,
28955,
198,
4798,
7,
16520,
7,
77,
11,
299,
4008
] | 2.1875 | 16 |
import SimpleNN as nn
import ConvNN as cnn
import os
parameters = [[512],[256],[128], [96], [64], [48], [32], [24], [16], [12], [8], [4]]
parameters = parameters[::-1]
nn.feedFashionDataset()
cnn.feedFashionDataset()
root_directory = 'Saved Models/Fashion/'
for epoch in [1,2,3,4,5,10,15,20,25,30]:
for test in [1,2,3]:
model_save_directory = root_directory + "Variable Units - "+str(epoch)+" epochs/Test" + str(test) + "/"
scores_list = []
for l in parameters:
if not os.path.exists(model_save_directory):
os.makedirs(model_save_directory)
test_acc,train_acc = nn.makeAndRunModel(l, model_save_directory, e=epoch)
scores_list.append("Nodes by Layer"+str(l)+"\nTest accuracy: " + str(test_acc) + "\n")
f = open(model_save_directory+"scores.txt","w+")
for s in scores_list:
f.write(s)
f.close()
| [
11748,
17427,
6144,
355,
299,
77,
198,
11748,
34872,
6144,
355,
269,
20471,
198,
11748,
28686,
198,
198,
17143,
7307,
796,
16410,
25836,
38430,
11645,
38430,
12762,
4357,
685,
4846,
4357,
685,
2414,
4357,
685,
2780,
4357,
685,
2624,
4357,... | 2.355114 | 352 |
"""Networking Module."""
IPPROTO_IPV4 = 0x0800
| [
37811,
7934,
16090,
19937,
526,
15931,
198,
198,
4061,
4805,
26631,
62,
4061,
53,
19,
796,
657,
87,
2919,
405,
198
] | 2.285714 | 21 |
try:
from tempfile import TemporaryDirectory
except ImportError:
from shutil import rmtree
from contextlib import contextmanager
from tempfile import mkdtemp
@contextmanager
import functools
from sys import platform
import pytest
skip_windows = functools.partial(pytest.mark.skipif, platform == 'win32')
| [
28311,
25,
198,
220,
220,
220,
422,
20218,
7753,
1330,
46042,
43055,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
4423,
346,
1330,
374,
16762,
631,
198,
220,
220,
220,
422,
4732,
8019,
1330,
4732,
37153,
198,
220,
220,
220,
... | 3.313131 | 99 |
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
2,
... | 3.541667 | 48 |
"""Analizar los siguientes ejercicios de condiciones, representarlos mediante algoritmos en Python. """
#3.-Se tiene que evaluar cuatro notas de un alumno, como resultado se visualiza el
# promedio del alumno junto con su condición de APROBADO o DESAPROBADO,
# sieste aprobado y con 18 o más, saldrá el siguiente mensaje “Certificado en MSOFFICE”.
Nota01=int(input("Ingresa Nota N°01 : "))
Nota02=int(input("Ingresa Nota N°02 : "))
Nota03=int(input("Ingresa Nota N°03 : "))
Nota04=int(input("Ingresa Nota N°04 : "))
Promedio=(Nota01+Nota02+Nota03+Nota04)/4
if Promedio >=18:
print("“Certificado en MSOFFICE”")
elif Promedio>=11:
print("APROBADO")
else:
print("DESAPROBADO") | [
37811,
2025,
282,
528,
283,
22346,
43237,
84,
1153,
274,
304,
73,
2798,
291,
4267,
390,
1779,
47430,
274,
11,
2380,
7063,
418,
1117,
3014,
68,
435,
7053,
270,
16785,
551,
11361,
13,
37227,
198,
2,
18,
7874,
4653,
46668,
1734,
8358,
... | 2.328814 | 295 |
# [Shaolin Temple] For Shaolin Temple
ZHEUNG_GUAN = 9310046
WISE_CHIEF_PRIEST = 9310053
ELDER_JUNG = 9310049
HAIFENG_FASHI = 9310051
sm.removeEscapeButton()
sm.setSpeakerID(ELDER_JUNG)
sm.setBoxChat()
sm.sendNext("The mountain weeps this day. My old friend has become a demon... What pain must he have been hiding to have come to this?")
sm.flipBoxChat()
sm.flipBoxChatPlayerAsSpeaker()
sm.sendNext("It goes to show... A tiny grain of greed can snowball into something huge and monstrous")
sm.removeEscapeButton()
sm.setSpeakerID(ELDER_JUNG)
sm.setBoxChat()
sm.sendNext("My friend's body has disappeared, but his spirit remains. "
"It must be exorcised, over and over, until the temple is clean. "
"You can encounter <Boss: Chief Priest> by talking to #p"+ str(HAIFENG_FASHI) +"# at the Sutra Depository entrance. "
"We will reward you as best we can.")
sm.sendNext("I will remain here, working humbly and tirelessly to restore Shaolin Temple to its former purity. You are welcome at any time.")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.completeQuestNoRewards(62044)
sm.giveExp(2525566)
sm.giveItem(1142912) # Shaolin Savior (Medal) | [
2,
685,
2484,
64,
24910,
10857,
60,
1114,
19413,
24910,
10857,
198,
198,
57,
13909,
4944,
38,
62,
38022,
1565,
796,
10261,
3064,
3510,
198,
54,
24352,
62,
3398,
10008,
37,
62,
4805,
40,
6465,
796,
10261,
3064,
4310,
198,
3698,
14418,
... | 2.837321 | 418 |
import pymongo
client = pymongo.MongoClient('localhost', 27017)
db = client.stranger_tele_bot
| [
11748,
279,
4948,
25162,
628,
198,
16366,
796,
279,
4948,
25162,
13,
44,
25162,
11792,
10786,
36750,
3256,
2681,
29326,
8,
198,
9945,
796,
5456,
13,
2536,
2564,
62,
46813,
62,
13645,
198
] | 2.909091 | 33 |
# Generated by Django 2.2.15 on 2020-09-24 20:57
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1314,
319,
12131,
12,
2931,
12,
1731,
1160,
25,
3553,
198,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
426... | 3.038462 | 52 |
# -*- coding: utf-8 -*-
import apple
import canon
import casio
import fujifilm
import nikon
import olympus
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
17180,
198,
11748,
18061,
198,
11748,
6124,
952,
198,
11748,
14035,
73,
361,
346,
76,
198,
11748,
299,
1134,
261,
198,
11748,
267,
6760,
385,
198
] | 2.609756 | 41 |
import warnings
from collections import OrderedDict
import pandas as pd
from . import dtypes, utils
from .alignment import align
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
def concat(
objs,
dim=None,
data_vars="all",
coords="different",
compat="equals",
positions=None,
indexers=None,
mode=None,
concat_over=None,
fill_value=dtypes.NA,
join="outer",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if dim is None:
warnings.warn(
"the `dim` argument to `concat` will be required "
"in a future version of xarray; for now, setting it to "
"the old default of 'concat_dim'",
FutureWarning,
stacklevel=2,
)
dim = "concat_dims"
if indexers is not None: # pragma: no cover
warnings.warn(
"indexers has been renamed to positions; the alias "
"will be removed in a future version of xarray",
FutureWarning,
stacklevel=2,
)
positions = indexers
if mode is not None:
raise ValueError(
"`mode` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if concat_over is not None:
raise ValueError(
"`concat_over` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, "name", None)
if dim_name is None:
dim_name = "concat_dim"
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
process_subset_opt(data_vars, "data_vars")
process_subset_opt(coords, "coords")
return concat_over, equals
def _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ["equals", "identical"]:
raise ValueError(
"compat=%r invalid: must be 'equals' " "or 'identical'" % compat
)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(
*datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value
)
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs):
raise ValueError("dataset global attributes not equal")
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError("encountered unexpected variable %r" % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError(
"%r is a coordinate in some datasets but not " "others" % k
)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == "identical" and not utils.dict_equiv(
v.attrs, result_vars[k].attrs
):
raise ValueError("variable %s not identical across datasets" % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError("variable %s not equal across datasets" % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
| [
11748,
14601,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
764,
1330,
288,
19199,
11,
3384,
4487,
198,
6738,
764,
282,
16747,
1330,
10548,
198,
6738,
764,
45286,
1330,
12901,
... | 2.54051 | 4,394 |
import argparse
import hashlib
import sys
import os
import Cryptodome.Cipher.AES as AES
from getpass import getpass
from itertools import chain
from contextlib import ExitStack
from functools import wraps
from .src.cipher import EncryptionCipher, DecryptionCipher, configure_cipher
from .. import __version__ as version
try:
from .src import files
except ImportError:
sys.path.append(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(
__file__
)
)
)
)
)
)
from agutil.security.src import files
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
12234,
8019,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
15126,
375,
462,
13,
34,
10803,
13,
32,
1546,
355,
34329,
198,
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
340,
861,
10141,
1330,
6333,
198,... | 2.02611 | 383 |
import pygame
import json
from settings import *
from Button_Classes import Button
import image_choosing
import maingame_loop
import cv2
import numpy
import calibration
pygame.font.init()
MENU_OPTIONS = pygame.image.load(OPTIONS_BACKGROUND)
MENU_OPTIONS=pygame.transform.scale(MENU_OPTIONS,(WINDOWS_WIDTH,WINDOWS_HEIGHT))
COLOR = (0, 125, 200)
LEFT_CALLIBRATE_BUTTON = [0.05 * WINDOWS_WIDTH, 0.8 * WINDOWS_HEIGHT]
RIGHT_CALLIBRATE_BUTTON = [0.75 * WINDOWS_WIDTH, 0.8 * WINDOWS_HEIGHT]
PLAY_BUTTON = [0.3 * WINDOWS_WIDTH, 0.3 * WINDOWS_HEIGHT]
PLAYER_ONE_IMAGE = ((0.05 * WINDOWS_WIDTH, 0.1 * WINDOWS_HEIGHT, 0.2 * WINDOWS_WIDTH, 0.6 * WINDOWS_HEIGHT))
PLAYER_TWO_IMAGE = ((0.75 * WINDOWS_WIDTH, 0.1 * WINDOWS_HEIGHT, 0.2 * WINDOWS_WIDTH, 0.6 * WINDOWS_HEIGHT))
MENU_BUTTON = [0.45 * WINDOWS_WIDTH, 0.8 * WINDOWS_HEIGHT]
| [
11748,
12972,
6057,
198,
11748,
33918,
198,
198,
6738,
6460,
1330,
1635,
198,
6738,
20969,
62,
9487,
274,
1330,
20969,
198,
11748,
2939,
62,
6679,
2752,
198,
11748,
17266,
278,
480,
62,
26268,
198,
11748,
269,
85,
17,
198,
11748,
299,
... | 2.214854 | 377 |
n1 = int(input("Primeiro número: "))
n2 = int(input("Segundo número: "))
print("Soma: {}".format(n1+n2))
| [
77,
16,
796,
493,
7,
15414,
7203,
26405,
7058,
299,
21356,
647,
78,
25,
366,
4008,
198,
77,
17,
796,
493,
7,
15414,
7203,
41030,
41204,
299,
21356,
647,
78,
25,
366,
4008,
198,
198,
4798,
7203,
50,
6086,
25,
23884,
1911,
18982,
7,... | 2.078431 | 51 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from pipeline.utils.tools import extract_explicit_parameter
| [
2,
198,
2,
220,
15069,
13130,
383,
376,
6158,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
... | 3.675532 | 188 |
if __name__ == "__main__":
print("not so rad")
| [
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7203,
1662,
523,
2511,
4943,
198
] | 2.428571 | 21 |
"""
Provide implementation of the FastRandomChoice interfaces.
"""
from abc import (
ABC,
abstractmethod,
)
class FastRandomChoiceInterface(ABC):
"""
Provide implementation of the FastRandomChoice interfaces.
"""
@abstractmethod
def choice(self, seq, p):
"""
Choose a random element from a non-empty sequence with probabilities list.
:param seq: non-empty sequence.
:param p: probabilities according to sequence.
:return: random element from sequence
"""
| [
37811,
198,
15946,
485,
7822,
286,
262,
12549,
29531,
46770,
20314,
13,
198,
37811,
198,
6738,
450,
66,
1330,
357,
198,
220,
220,
220,
9738,
11,
198,
220,
220,
220,
12531,
24396,
11,
198,
8,
628,
198,
4871,
12549,
29531,
46770,
39317,... | 2.988827 | 179 |
"""
Code Linting
"""
from invoke import task
@task
def lint(context):
"""
Runs quality checks through linter
"""
context.run('prospector')
| [
37811,
198,
10669,
406,
600,
278,
198,
37811,
198,
6738,
26342,
1330,
4876,
628,
198,
31,
35943,
198,
4299,
300,
600,
7,
22866,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
44743,
3081,
8794,
832,
300,
3849,
198,
220,
220,
2... | 2.754386 | 57 |