content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
11748,
4818,
8079,
198,
6738,
42625,
... | 3.037736 | 53 |
import logging
import typing
from collections.abc import Iterator
from enum import Enum
from functools import lru_cache
from typing import Any, Dict, Mapping, Tuple, TypeVar, Union
import pydantic
import requests
from pydantic import Field, ValidationError, validator
from pydantic.class_validators import make_generic_validator
from pydantic.typing import get_origin # type: ignore [attr-defined]
try:
import requests_cache
except ImportError: # pragma: no cover
HAS_REQUESTS_CACHE = False
else:
HAS_REQUESTS_CACHE = True
KT = TypeVar("KT")
VT = TypeVar("VT")
log = logging.getLogger(__name__)
__all__ = [
"BaseModel",
"DictLike",
"compare",
"dictlike_field",
"only",
"summarize_dictlike",
"validate_dictlike",
"validator",
]
# Mapping from Resource value to class name.
CLASS_NAME = {
"dataflow": "DataflowDefinition",
"datastructure": "DataStructureDefinition",
}
# Inverse of :data:`CLASS_NAME`.
VALUE = {v: k for k, v in CLASS_NAME.items()}
class Resource(str, Enum):
"""Enumeration of SDMX-REST API resources.
============================= ======================================================
:class:`Enum` member :mod:`pandasdmx.model` class
============================= ======================================================
``actualconstraint`` :class:`.ContentConstraint`
``agencyscheme`` :class:`.AgencyScheme`
``allowedconstraint`` :class:`.ContentConstraint`
``attachementconstraint`` :class:`.AttachmentConstraint`
``categorisation`` :class:`.Categorisation`
``categoryscheme`` :class:`.CategoryScheme`
``codelist`` :class:`.Codelist`
``conceptscheme`` :class:`.ConceptScheme`
``contentconstraint`` :class:`.ContentConstraint`
``data`` :class:`.DataSet`
``dataflow`` :class:`.DataflowDefinition`
``dataconsumerscheme`` :class:`.DataConsumerScheme`
``dataproviderscheme`` :class:`.DataProviderScheme`
``datastructure`` :class:`.DataStructureDefinition`
``organisationscheme`` :class:`.OrganisationScheme`
``provisionagreement`` :class:`.ProvisionAgreement`
``structure`` Mixed.
----------------------------- ------------------------------------------------------
``customtypescheme`` Not implemented.
``hierarchicalcodelist`` Not implemented.
``metadata`` Not implemented.
``metadataflow`` Not implemented.
``metadatastructure`` Not implemented.
``namepersonalisationscheme`` Not implemented.
``organisationunitscheme`` Not implemented.
``process`` Not implemented.
``reportingtaxonomy`` Not implemented.
``rulesetscheme`` Not implemented.
``schema`` Not implemented.
``structureset`` Not implemented.
``transformationscheme`` Not implemented.
``userdefinedoperatorscheme`` Not implemented.
``vtlmappingscheme`` Not implemented.
============================= ======================================================
"""
actualconstraint = "actualconstraint"
agencyscheme = "agencyscheme"
allowedconstraint = "allowedconstraint"
attachementconstraint = "attachementconstraint"
categorisation = "categorisation"
categoryscheme = "categoryscheme"
codelist = "codelist"
conceptscheme = "conceptscheme"
contentconstraint = "contentconstraint"
customtypescheme = "customtypescheme"
data = "data"
dataconsumerscheme = "dataconsumerscheme"
dataflow = "dataflow"
dataproviderscheme = "dataproviderscheme"
datastructure = "datastructure"
hierarchicalcodelist = "hierarchicalcodelist"
metadata = "metadata"
metadataflow = "metadataflow"
metadatastructure = "metadatastructure"
namepersonalisationscheme = "namepersonalisationscheme"
organisationscheme = "organisationscheme"
organisationunitscheme = "organisationunitscheme"
process = "process"
provisionagreement = "provisionagreement"
reportingtaxonomy = "reportingtaxonomy"
rulesetscheme = "rulesetscheme"
schema = "schema"
structure = "structure"
structureset = "structureset"
transformationscheme = "transformationscheme"
userdefinedoperatorscheme = "userdefinedoperatorscheme"
vtlmappingscheme = "vtlmappingscheme"
@classmethod
def from_obj(cls, obj):
"""Return an enumeration value based on the class of `obj`."""
value = obj.__class__.__name__
return cls[VALUE.get(value, value)]
@classmethod
def class_name(cls, value: "Resource", default=None) -> str:
"""Return the name of a :mod:`pandasdmx.model` class from an enum value.
Values are returned in lower case.
"""
return CLASS_NAME.get(value.value, value.value)
@classmethod
#: Response codes defined by the SDMX-REST standard.
RESPONSE_CODE = {
200: "OK",
304: "No changes",
400: "Bad syntax",
401: "Unauthorized",
403: "Semantic error", # or "Forbidden"
404: "Not found",
406: "Not acceptable",
413: "Request entity too large",
414: "URI too long",
500: "Internal server error",
501: "Not implemented",
503: "Unavailable",
}
class BaseModel(pydantic.BaseModel):
"""Common settings for :class:`pydantic.BaseModel` in :mod:`pandasdmx`."""
class MaybeCachedSession(type):
"""Metaclass to inherit from :class:`requests_cache.CachedSession`, if available.
If :mod:`requests_cache` is not installed, returns :class:`requests.Session` as a
base class.
"""
KT = TypeVar("KT")
VT = TypeVar("VT")
log = logging.getLogger(__name__)
__all__ = [
"BaseModel",
"DictLike",
"compare",
"dictlike_field",
"only",
"summarize_dictlike",
"validate_dictlike",
"validator",
]
class BaseModel(pydantic.BaseModel):
"""Common settings for :class:`pydantic.BaseModel` in :mod:`sdmx`."""
class MaybeCachedSession(type):
"""Metaclass to inherit from :class:`requests_cache.CachedSession`, if available.
If :mod:`requests_cache` is not installed, returns :class:`requests.Session` as a
base class.
"""
class DictLike(dict, typing.MutableMapping[KT, VT]):
"""Container with features of a dict & list, plus attribute access."""
__slots__ = ("__dict__", "__field")
def __getitem__(self, key: Union[KT, int]) -> VT:
""":meth:`dict.__getitem__` with integer access."""
try:
return super().__getitem__(key)
except KeyError:
if isinstance(key, int):
# int() index access
return list(self.values())[key]
else:
raise
def __getstate__(self):
"""Exclude ``__field`` from items to be pickled."""
return {"__dict__": self.__dict__}
def __setitem__(self, key: KT, value: VT) -> None:
""":meth:`dict.__setitem` with validation."""
super().__setitem__(*self._validate_entry(key, value))
def copy(self):
"""Return a copy of the DictLike."""
return self.__copy__()
# pydantic compat
@classmethod
@classmethod
def _validate_whole(cls, v, field: pydantic.fields.ModelField):
"""Validate `v` as an entire DictLike object."""
# Convert anything that can be converted to a dict(). pydantic internals catch
# most other invalid types, e.g. set(); no need to handle them here.
result = cls(v)
# Reference to the pydantic.field.ModelField for the entries
result.__field = field
return result
def _validate_entry(self, key, value):
"""Validate one `key`/`value` pair."""
try:
# Use pydantic's validation machinery
v, error = self.__field._validate_mapping_like(
((key, value),), values={}, loc=(), cls=None
)
except AttributeError:
# .__field is not populated
return key, value
else:
if error:
raise ValidationError([error], self.__class__)
else:
return (key, value)
def compare(self, other, strict=True):
"""Return :obj:`True` if `self` is the same as `other`.
Two DictLike instances are identical if they contain the same set of keys, and
corresponding values compare equal.
Parameters
----------
strict : bool, optional
Passed to :func:`compare` for the values.
"""
if set(self.keys()) != set(other.keys()):
log.info(f"Not identical: {sorted(self.keys())} / {sorted(other.keys())}")
return False
for key, value in self.items():
if not value.compare(other[key], strict):
return False
return True
# Utility methods for DictLike
#
# These are defined in separate functions to avoid collisions with keys and the
# attribute access namespace, e.g. if the DictLike contains keys "summarize" or
# "validate".
def dictlike_field():
"""Shorthand for :class:`pydantic.Field` with :class:`.DictLike` default factory."""
return Field(default_factory=DictLike)
def summarize_dictlike(dl, maxwidth=72):
"""Return a string summary of the DictLike contents."""
value_cls = dl[0].__class__.__name__
count = len(dl)
keys = " ".join(dl.keys())
result = f"{value_cls} ({count}): {keys}"
if len(result) > maxwidth:
# Truncate the list of keys
result = result[: maxwidth - 3] + "..."
return result
def validate_dictlike(cls):
"""Adjust `cls` so that its DictLike members are validated.
This is necessary because DictLike is a subclass of :class:`dict`, and so
:mod:`pydantic` fails to call :meth:`~DictLike.__get_validators__` and register
those on BaseModels which include DictLike members.
"""
# Iterate over annotated members of `cls`; only those which are DictLike
for name, anno in filter(
lambda item: get_origin(item[1]) is DictLike, cls.__annotations__.items()
):
# Add the validator(s)
field = cls.__fields__[name]
field.post_validators = field.post_validators or []
field.post_validators.extend(
make_generic_validator(v) for v in DictLike.__get_validators__()
)
return cls
def compare(attr, a, b, strict: bool) -> bool:
"""Return :obj:`True` if ``a.attr`` == ``b.attr``.
If strict is :obj:`False`, :obj:`None` is permissible as `a` or `b`; otherwise,
"""
return getattr(a, attr) == getattr(b, attr) or (
not strict and None in (getattr(a, attr), getattr(b, attr))
)
# if not result:
# log.info(f"Not identical: {attr}={getattr(a, attr)} / {getattr(b, attr)}")
# return result
def only(iterator: Iterator) -> Any:
"""Return the only element of `iterator`, or :obj:`None`."""
try:
result = next(iterator)
flag = object()
assert flag is next(iterator, flag)
except (StopIteration, AssertionError):
return None # 0 or ≥2 matches
else:
return result
def parse_content_type(value: str) -> Tuple[str, Dict[str, Any]]:
"""Return content type and parameters from `value`.
Modified from :mod:`requests.util`.
"""
tokens = value.split(";")
content_type, params_raw = tokens[0].strip(), tokens[1:]
params = {}
to_strip = "\"' "
for param in params_raw:
k, *v = param.strip().split("=")
if not k and not v:
continue
params[k.strip(to_strip).lower()] = v[0].strip(to_strip) if len(v) else True
return content_type, params
@lru_cache()
def direct_fields(cls) -> Mapping[str, pydantic.fields.ModelField]:
"""Return the :mod:`pydantic` fields defined on `obj` or its class.
This is like the ``__fields__`` attribute, but excludes the fields defined on any
parent class(es).
"""
return {
name: info
for name, info in cls.__fields__.items()
if name not in set(cls.mro()[1].__fields__.keys())
}
try:
from typing import get_args # type: ignore [attr-defined]
except ImportError: # pragma: no cover
# For Python <3.8
| [
11748,
18931,
198,
11748,
19720,
198,
6738,
17268,
13,
39305,
1330,
40806,
1352,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
337,
5912,
11,
... | 2.497591 | 4,982 |
"""
Test report metrics for a mapping job.
"""
import unittest
import logging
import numpy
from pbcore.io import AlignmentSet, ConsensusAlignmentSet
from pbcommand.models import FileTypes
from pysiv2.custom.base import TestReportStatistics, setUpFindAlignments
log = logging.getLogger(__name__)
class TestMappingStats(TestReportStatistics):
"""
Compare the contents of the mapping stats report against expected values.
This will work for both the SubreadSet and ConsensusReadSet variants of
the report (although some tests will be skipped in the latter case).
"""
REPORT_ID = set(["mapping_stats", "mapping_stats_ccs", "mapping_stats_hgap"]) # XXX yuck.
TEST_ID = "mapping_stats"
METRIC_IDS = [
"mapped_subread_bases_n",
"mapped_alignments_n",
"mapped_reads_n",
"mapped_subreads_n",
"mapped_readlength_mean",
"mapped_subread_concordance_mean",
"mapped_subread_readlength_mean",
]
MAX_RECORDS = 100000
def test_mapped_read_concordance_is_sequence_identity(self):
"""
Verify that the subread concordance in the report corresponds to the
sequence identity extracted by pbcore.io.
"""
# XXX workaround for CCS+mapping jobs
ds_type = AlignmentSet
stat_id = "mapped_subread_concordance_mean"
mean_id_report = self._get_stat(stat_id)
if mean_id_report is None:
raise unittest.SkipTest("mapped_subread_concordance_mean not "+
"found in report")
if self.alignment_file_name is None:
ft_id = FileTypes.DS_ALIGN_CCS.file_type_id
alignment_files = []
for file_info in self.datastore.get_file_dict().values():
if file_info.is_chunked:
continue
if file_info.file_type_id == ft_id:
if file_info.file_id.startswith("pbalign"):
self.alignment_file_name = file_info.path
break
if self.alignment_file_name is None:
assert len(alignment_files) == 1
self.alignment_file_name = alignment_files[0]
ds_type = ConsensusAlignmentSet
stat_id = "mapped_read_concordance_mean"
identities = []
with ds_type(self.alignment_file_name, skipCounts=True) as ds:
if ds.numRecords > self.MAX_RECORDS:
log.warn("{} exceeds size cutoff {}".format(ds.numRecords, self.MAX_RECORDS))
raise unittest.SkipTest("Exceeds size cutoff")
for bam in ds.resourceReaders():
identities.extend(list(bam.identity))
mean_id = numpy.round(numpy.array(identities).mean(), decimals=4)
mean_id_report = self._get_stat(stat_id)
self.assertAlmostEqual(mean_id, mean_id_report, places=4)
def test_all_movies_have_mapped_reads(self):
"""
Test that all movies included in the by-movie table have mapped reads.
"""
for column in self.report.tables[0].columns:
if column.id == "mapped_reads":
self.assertTrue(all([x>0 for x in column.values]),
"One or more movies has no mapped reads")
break
else:
self.fail("Can't find mapped reads column")
def test_number_of_mapped_movies(self):
"""
Test that the number of mapped movies as shown in the report is equal
to the specified value (optional).
"""
number_of_mapped_movies = self.expected_values.get("number_of_mapped_movies", None)
if number_of_mapped_movies is None:
raise unittest.SkipTest("number_of_mapped_movies not specified")
else:
col = self.report.tables[0].columns[0]
self.assertEqual(len(col.values), number_of_mapped_movies + 1)
| [
198,
37811,
198,
14402,
989,
20731,
329,
257,
16855,
1693,
13,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
198,
198,
6738,
279,
65,
7295,
13,
952,
1330,
978,
16747,
7248,
11,
3515,
7314,... | 2.173961 | 1,805 |
import unittest
from Calculator import Calculator
from CsvReader import CsvReader
from pprint import pprint
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
43597,
1330,
43597,
198,
6738,
327,
21370,
33634,
1330,
327,
21370,
33634,
198,
6738,
279,
4798,
1330,
279,
4798,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
... | 3.22449 | 49 |
# INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# So it would look like: "ORIGINAL_TEXT" : "TRANSLATED_TEXT",
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_3_5_0 = {
"Tooltip Appearance:": "",
"Tooltip's font, font size, font color and background": "",
"Disable tooltip's blurry background": "",
"Sync time with the internet": "",
"Internet date and time": "",
"Select internet time provider, change sync frequency": "",
"Enable internet time sync": "",
"Paste a URL from the world clock api or equivalent": "",
"Help": "",
"Internet sync frequency": "",
"10 minutes": "",
"30 minutes": "",
"1 hour": "",
"2 hours": "",
"4 hours": "",
"10 hours": "",
"24 hours": "",
}
lang_3_4_0 = lang_3_5_0 | {
"Show calendar": "Visa kalender",
"Disabled": "Inaktiverad",
"Open quick settings": "Öppna snabbinställningar",
"Show desktop": "Visa skrivbord",
"Open run dialog": "Öppna kör",
"Open task manager": "Öppna aktivitetshanteraren",
"Open start menu": "Öppna startmenyn",
"Open search menu": "Öppna sökmenyn",
"Change task": "Ändra uppgift",
"Change the action done when the clock is clicked": "Ändra vad som händer när klockan klickas",
}
lang_3_3_2 = lang_3_4_0 | {
"ElevenClock Updater": "ElevenClock uppdaterare",
"ElevenClock is downloading updates": "ElevenClock hämtar uppdateringar",
"ElevenClock has updated to version {0} successfully\nPlease see GitHub for the changelog": "ElevenClock har uppdaterats till version {0}\nSe GitHub för ändringar",
"Customize the clock on Windows 11": "Anpassa klockan på Windows 11",
"Disable the new instance checker method": "Inaktivera den nya instance checker metoden",
"Import settings from a local file": "Importera inställningar från en lokal fil",
"Export settings to a local file": "Exportera inställningar från en lokal fil",
"Export": "Exportera",
"Import": "Importera",
}
lang_3_3_1 = lang_3_3_2 | {
"Invalid time format\nPlease follow the\nC 1989 Standards": "Ogiltigt tidsformat\nVänligen följ\n1989 C standarden",
"Nothing to preview": "Inget att förhandsvisa",
"Invalid time format\nPlease modify it\nin the settings": "Ogiltigt tidsformat\nVänligen modifiera\nformatet i inställningarna",
"Disable the tooltip shown when the clock is hovered": "Inaktivera tooltip som visas när klockan hålls över"
}
lang_3_3 = lang_3_3_1 | {
"Custom format rules:": "Regler för anpassad formatering",
"Any text can be placed here. To place items such as date and time, please use the 1989 C standard. More info on the following link": "Valfri text kan placeras här. För att placera saker som datum och tid, vänligen använd 1989 C standarden. Mer info finns på följande länk:",
"Python date and time formats": "Python datum och tidsformat",
"To disable the zero-padding effect, add a # in between the % and the code: non-zero-padded hours would be %#H, and zero-padded hours would be %H": "För att inaktivera zero-padding, lägg till ett # mellan % och koden: non-zero-padded timmar skrivs %#H, och zero-padded timmar skrivs %H", # Here please don't modify the %H and %#H values
"Click on Apply to apply and preview the format": "Klicka på Verkställ för att förhandsgranska formateringen",
"Apply": "Verkställ",
"If you don't understand what is happening, please uncheck the checkbox over the text area": "Om du inte förstår vad som händer, vänligen avmarkera kryssrutan över textområdet",
"Set a custom date and time format": "Ange ett anpassat format för datum och tid",
"(for advanced users only)": "(för avancerade användare)",
"Move this clock to the left": "Flytta den här klockan till vänster",
"Move this clock to the top": "Flytta den här klockan till toppen",
"Move this clock to the right": "Flytta den här klockan till höger",
"Move this clock to the bottom": "Flytta den här klockan till botten",
"Restore horizontal position": "Återställ horisontell position",
"Restore vertical position": "Återställ vertikal position",
}
lang_3_2_1 = lang_3_3 | {
"Open online help to troubleshoot problems": "Öppna online hjälp för att felsöka problem",
"Reset ElevenClock preferences to defaults": "Återställ ElevenClock till standardinställningar",
"Specify a minimum width for the clock": "Specificera en minimi-bredd för klockan",
"Search on the settings": "Sök i inställningar",
"No results were found": "Inga resultat hittades",
}
lang_3_2 = lang_3_2_1 | {
"Use system accent color as background color": "Använd systemets accentfärg som bakgrundsfärg",
"Check only the focused window on the fullscreen check": "Kolla endast fokuserade fönstret vid fullscreen check",
"Clock on monitor {0}": "Klocka på bildskärm {0}",
"Move to the left": "Flytta till vänster",
"Show this clock on the left": "Visa den här klockan till vänster",
"Show this clock on the right": "Visa den här klockan till höger",
"Restore clock position": "Återställ klockans position",
}
lang_3_1 = lang_3_2 | {
"W": "v", # The initial of the word week in your language: W for week, S for setmana, etc.
"Disable the notification badge": "Inaktivera notification badge",
"Override clock default height": "Override klockans standardhöjd",
"Adjust horizontal clock position": "Justera klockans position horisontellt",
"Adjust vertical clock position": "Justera klockans position vertikalt",
"Export log as a file": "Exportera log som en fil",
"Copy log to clipboard": "Kopiera log till urklipp",
"Announcements:": "Meddelanden",
"Fetching latest announcement, please wait...": "Hämtar senaste meddelande, vänligen vänta",
"Couldn't load the announcements. Please try again later": "Kunde inte ladda meddelanden, vänligen försök igen senare",
"ElevenClock's log": "ElevenClocks log",
"Pick a color": "Välj en färg"
}
lang_3 = lang_3_1 | {
"Hide the clock during 10 seconds when clicked": "Göm klockan i 10 sekunder vid klick",
"Enable low-cpu mode": "Aktivera low-cpu mode",
"You might lose functionalities, like the notification counter or the dynamic background": "Du kan gå miste om funktioner som meddelanderäknaren eller dynamisk bakgrund",
"Clock position and size:": "Klockans position och storlek",
"Clock size preferences, position offset, clock at the left, etc.": "Inställningar för klockstorlek, position offset, klocka till vänster, osv.",
"Reset monitor blacklisting status": "Återställ skärmens svartlist status",
"Reset": "Återställ",
"Third party licenses": "Tredjeparts licenser",
"View": "Visa",
"ElevenClock": "ElevenClock",
"Monitor tools": "Skärmverktyg",
"Blacklist this monitor": "Svartlista den här skärmen",
"Third Party Open-Source Software in Elevenclock {0} (And their licenses)": "Tredjeparts Open-Source mjukvara i ElevenClock {0} (och deras licenser)",
"ElevenClock is an Open-Source application made with the help of other libraries made by the community:": "ElevenClock är en Open-Source applikation utvecklad med hjälp av bibliotek skapad av communityn",
"Ok": "Ok",
"More Info": "Mer info",
"About Qt": "Om Qt",
"Success": "Lyckades",
"The monitors were unblacklisted successfully.": "Skärmarna togs bort från svartlistning",
"Now you should see the clock everywhere": "Nu borde du se klockan överallt",
"Ok": "Ok",
"Blacklist Monitor": "Svarlista skärm",
"Blacklisting a monitor will hide the clock on this monitor permanently.": "Att svartlista en skärm döljer klockan från den här skärmen permanent",
"This action can be reverted from the settings window, under <b>Clock position and size</b>": "Denna handling kan ångras från inställningarna, under <b>Klockans position och storlek</b>",
"Are you sure do you want to blacklist the monitor \"{0}\"?": "Är du säker på att du vill svartlista skärmen \"{0}\"?",
"Yes": "Ja",
"No": "Nej",
}
lang_2_9_2 = lang_3 | {
"Reload log": "Ladda om log",
"Do not show the clock on secondary monitors": "Visa inte klockan på sekundära skärmar",
"Disable clock taskbar background color (make clock transparent)": "Inaktivera klockans bakgrundsfärg i aktivitetsfältet (gör klockan transparent",
"Open the welcome wizard": "Öppna välkomstguiden",
" (ALPHA STAGE, MAY NOT WORK)": " (ALPHA STADIE, KANSKE INTE FUNGERAR)",
"Welcome to ElevenClock": "Välkommen till ElevenClock",
"Skip": "Hoppa Över",
"Start": "Start",
"Next": "Nästa",
"Finish": "Klar",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "Aktivitetshanteraren",
"Change date and time": "Ändra datum och tid",
"Notification settings": "Notifikationsinställningar",
"Updates, icon tray, language": "Uppdateringar, ikon, språk",
"Hide extended options from the clock right-click menu (needs a restart to be applied)": "Göm utökade inställnigar från klockans högerklickmeny (behöver omstart för att tillämpas)",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "Fullskärmsbeteende, klockans position, huvudskärmsklocka, andra diverse inställningar",
'Add the "Show Desktop" button on the left corner of every clock': 'Lägg till "Visa skrivbord" knapp till vänstra hörnet av varje klocka',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': 'Du kan behöva sätta en anpassad bakgrundsfärg för att det ska fungera. Mer info <a href="{0}" style="color:DodgerBlue">HÄR</a> ',
"Clock's font, font size, font color and background, text alignment": "Klockans font, teckenstorlek, färg på font och bakgrund, textjustering",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "Datum och tidsformatering, sekunder, veckodag, veckonummer, regionala inställningar",
"Testing features and error-fixing tools": "Testar funktioner och felhanteringsverktyg",
"Language pack author(s), help translating ElevenClock": "Språkpakets författare, hjälp översätt ElevenClock",
"Info, report a bug, submit a feature request, donate, about": "Info, rapportera en bug, skicka in en funktionsbegäran, donera, om",
"Log, debugging information": "Log, debugging information",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "Tvinga klockan att vara på toppen av skärmen",
"Show the clock on the primary screen": "Visa klockan på primärskärmen",
"Use a custom font color": "Använd anpassad teckenfärg",
"Use a custom background color": "Använd anpassad bakgrundsfärg",
"Align the clock text to the center": "Justera placering av text till klockans centrum",
"Select custom color": "Välj anpassad färg",
"Hide the clock when a program occupies all screens": "Dölj klockan när ett program tar upp alla skärmar",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "Använd en anpassad font",
"Use a custom font size": "Använd en anpassad font storlek",
"Enable hide when multi-monitor fullscreen apps are running": "Aktivera dölj när helskärmsappar med flera skärmar körs",
"<b>{0}</b> needs to be enabled to change this setting": "<b>{0}</b> måste vara aktiverat för att ändra denna inställning",
"<b>{0}</b> needs to be disabled to change this setting": "<b>{0}</b> måste inaktiveras för att ändra den här inställningen",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": " (Den här funktionen har inaktiverats eftersom den borde fungera som standard. Om den inte är det, rapportera ett fel)",
"ElevenClock's language": "ElevenClocks språk"
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "Om Qt6 (PySide6)",
"About": "Om",
"Alternative non-SSL update server (This might help with SSL errors)": "Alternativ icke-SSL-uppdateringsserver (Detta kan hjälpa till med SSL-fel)",
"Fixes and other experimental features: (Use ONLY if something is not working)": "Fixar och andra experimentella funktioner: (Använd ENDAST om något inte fungerar)",
"Show week number on the clock": "Visa veckonummer på klockan"
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "Dölj klockan när RDP Client eller Citrix Workspace körs",
"Clock Appearance:": "Klockans utseende",
"Force the clock to have black text": "Tvinga klockan att ha svart text",
" - It is required that the Dark Text checkbox is disabled": " - Det krävs att kryssrutan Mörk text är inaktiverad",
"Debbugging information:": "Debugging information",
"Open ElevenClock's log": "Öppna ElevenClocks logg",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "Visa klockan på den primära skärmen (Användbart om klockan är inställd till vänster)",
"Show weekday on the clock" :"Visa veckodag på klockan",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"ElevenClock Inställningar", # Also settings title
"Reload Clocks" :"Ladda om klockor",
"ElevenClock v{0}" :"ElevenClock v{0}",
"Restart ElevenClock" :"Starta om ElevenClock",
"Hide ElevenClock" :"Göm ElevenClock",
"Quit ElevenClock" :"Avsluta ElevenClock",
#General settings section
"General Settings:" :"Allmänna Inställningar:",
"Automatically check for updates" :"Sök automatiskt efter uppdateringar",
"Automatically install available updates" :"Installera tillgängliga uppdateringar automatiskt",
"Enable really silent updates" :"Aktivera riktigt tysta uppdateringar",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"Kringgå uppdateringsleverantörens äkthetskontroll (REKOMMENDERAS INTE, PÅ DIN EGEN RISK)",
"Show ElevenClock on system tray" :"Visa ElevenClock i systemfältet",
"Alternative clock alignment (may not work)" :"Alternativ klockjustering (fungerar kanske inte)",
"Change startup behaviour" :"Ändra startbeteende",
"Change" :"Förändra",
"<b>Update to the latest version!</b>" :"<b>Uppdatera till den senaste versionen!</b>",
"Install update" :"Installera uppdatering",
#Clock settings
"Clock Settings:" :"Klockinställningar:",
"Hide the clock in fullscreen mode" :"Dölj klockan i helskärmsläge",
"Hide the clock when RDP client is active" :"Dölj klockan när RDP-klienten är aktiv",
"Force the clock to be at the bottom of the screen" :"Tvinga klockan att vara längst ner på skärmen",
"Show the clock when the taskbar is set to hide automatically" :"Visa klockan när aktivitetsfältet är inställt på att döljas automatiskt",
"Fix the hyphen/dash showing over the month" :"Åtgärda bindestrecket/strecket som visas under månaden",
"Force the clock to have white text" :"Tvinga klockan att ha vit text",
"Show the clock at the left of the screen" :"Visa klockan till vänster på skärmen",
#Date & time settings
"Date & Time Settings:" :"Inställningar för datum och tid:",
"Show seconds on the clock" :"Visa sekunder på klockan",
"Show date on the clock" :"Visa datum på klockan",
"Show time on the clock" :"Visa tid på klockan",
"Change date and time format (Regional settings)" :"Ändra datum och tidsformat (regionala inställningar)",
"Regional settings" :"Regionala inställningar",
#About the language pack
"About the language pack:" :"Om språkpaketet:",
"Translated to English by martinet101" :"Översatt till Svenska av Noffe och cjal95", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"Översätt ElevenClock till ditt språk",
"Get started" :"Kom igång",
#About ElevenClock
"About ElevenClock version {0}:" :"Om ElevenClock version {0}:",
"View ElevenClock's homepage" :"Visa ElevenClocks hemsida",
"Open" :"Öppna",
"Report an issue/request a feature" :"Rapportera ett problem/begär en funktion",
"Report" :"Rapportera",
"Support the dev: Give me a coffee☕" :"Stöd utvecklaren: Ge mig en kaffe☕",
"Open page" :"Öppna sida",
"Icons by Icons8" :"Ikoner av Icons8", # Here, the word "Icons8" should not be translated
"Webpage" :"Webbsida",
"Close settings" :"Stäng inställningar",
"Close" :"Stäng",
}
lang = lang2_3
| [
2,
3268,
46126,
11053,
198,
198,
2,
3602,
17660,
262,
2420,
290,
3551,
340,
1022,
262,
366,
198,
2,
7788,
2390,
16437,
25,
220,
220,
220,
220,
220,
2656,
220,
220,
220,
4613,
220,
366,
1212,
2420,
318,
287,
46932,
25,
1988,
1391,
... | 2.394674 | 7,510 |
# some small tests to see whether sets are there and work
from py.builtin import set, frozenset
| [
2,
617,
1402,
5254,
284,
766,
1771,
5621,
389,
612,
290,
670,
198,
198,
6738,
12972,
13,
18780,
259,
1330,
900,
11,
8400,
8247,
316,
198
] | 3.730769 | 26 |
import os
DATA_FOLDER = 'data'
TRAIN_CSV = os.path.join(DATA_FOLDER, 'train.csv')
VAL_CSV = os.path.join(DATA_FOLDER, 'val.csv')
MODEL_FOLDER = 'models'
SAVED_ESTIMATOR = os.path.join(MODEL_FOLDER, 'LGBMClassifier.pickle') | [
11748,
28686,
198,
198,
26947,
62,
37,
3535,
14418,
796,
705,
7890,
6,
198,
51,
3861,
1268,
62,
7902,
53,
796,
28686,
13,
6978,
13,
22179,
7,
26947,
62,
37,
3535,
14418,
11,
705,
27432,
13,
40664,
11537,
198,
23428,
62,
7902,
53,
... | 2.133333 | 105 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 19:23:20 2020
@author: alfredocu
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
classifiers = {
"KNN": KNeighborsClassifier(3),
"SVM": SVC(gamma = 2, C = 1),
"GP": GaussianProcessClassifier(1.0 * RBF(1.0)),
"DT": DecisionTreeClassifier(max_depth = 5),
"MLP": MLPClassifier(alpha = 0.1, max_iter = 1000),
"Bayes": GaussianNB()
}
x, y = make_classification(n_features = 2, n_redundant = 0, n_informative = 2, n_clusters_per_class = 1)
rng = np.random.RandomState(2)
x += 1 * rng.uniform(size = x.shape)
linearly_separable = (x, y)
datasets = [make_moons(noise = 0.1), make_circles(noise = 0.1, factor = 0.5), linearly_separable]
cm = plt.cm.RdBu
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
###############################################################################
model_name = "Bayes" # Se agrega aqui el tipo de modelo a ejecutar.
figure = plt.figure(figsize = (9, 3))
h = .02 # Step
i = 1 # Counter
# Iterate over datasets
for ds_cnt, ds in enumerate(datasets):
x, y = ds
x = StandardScaler().fit_transform(x)
# Train and test
xtrain, xtest, ytrain, ytest = train_test_split(x, y)
# Min and Max for normalize data.
x_min, x_max = x[:, 0].min() - .5, x[:, 0].max() + .5
y_min, y_max = x[:, 1].min() - .5, x[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Classifications
model = classifiers[model_name]
ax = plt.subplot(1, 3, i)
# Training
model.fit(xtrain, ytrain)
score_train = model.score(xtrain, ytrain)
score_test = model.score(xtest, ytest)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(model, "decision_function"):
zz = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
zz = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
zz = zz.reshape(xx.shape)
ax.contourf(xx, yy, zz, cmap = cm, alpha = .8)
# Plot the training points
ax.scatter(xtrain[:, 0], xtrain[:, 1], c = ytrain, cmap = cm_bright, edgecolors = "k", alpha = 0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.text(xx.max() - .3, yy.min() + .7, "%.2f" % score_train, size = 15, horizontalalignment = "right")
ax.text(xx.max() - .3, yy.min() + .3, "%.2f" % score_test, size = 15, horizontalalignment = "right")
i += 1
plt.tight_layout()
# plt.show()
# plt.savefig("Bayes.eps", format="eps") | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
5267,
807,
678,
25,
1954,
25,
1238,
12131,
198,
198,
31,
9800,
25,
435,
39193,
420,... | 2.345881 | 1,408 |
import unittest
import im_lib
import numpy
#class CountObjectsTest(unittest.TestCase):
#@classmethod
#def setUpClass(clc):
#print("\nRunning CountObjects class setUp...")
#@classmethod
#def tearDownClass(clc):
#print("\nRunning CountObjects class tearDown...")
#def setUp(self):
#print("\nRunning setUp...")
#def tearDown(self):
#print("\nRunning tearDown...")
#def test_co(self):
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
545,
62,
8019,
198,
11748,
299,
32152,
628,
628,
628,
198,
2,
4871,
2764,
10267,
82,
14402,
7,
403,
715,
395,
13,
14402,
20448,
2599,
628,
220,
220,
220,
1303,
31,
4871,
24396,
198,
220,
220,
220,
... | 2.324201 | 219 |
from docutils import nodes
from docutils.parsers.rst import directives
CODE = """\
<object width="%(width)i" height="%(height)i">
<param name="flashvars" value="offsite=true&lang=en-us&page_show_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2Fshow%2F&page_show_back_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2F&set_id=72157629059614751&jump_to="> </param>
<param name="movie" value="http://www.flickr.com/apps/slideshow/show.swf?v=%(flickid)s"> </param>
<param name="allowFullScreen" value="true"></param>
<embed type="application/x-shockwave-flash" src="http://www.flickr.com/apps/slideshow/show.swf?v=%(flickid)s" allowFullScreen="true" flashvars="offsite=true&lang=en-us&page_show_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2Fshow%2F&page_show_back_url=%2Fphotos%2F75341362%40N04%2Fsets%2F72157629059614751%2F&set_id=72157629059614751&jump_to=" width="%(width)i" height="%(height)i"></embed>
</object>
"""
PARAM = """\n <param name="%s" value="%s"></param>"""
def flickr(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting flickr embedded slideshows """
if len(content) == 0:
return
string_vars = {
'flickid': content[0],
'width': 400,
'height': 300,
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')]
flickr.content = True
directives.register_directive('flickr', flickr)
from sphinx.util.compat import Directive
| [
6738,
2205,
26791,
1330,
13760,
198,
6738,
2205,
26791,
13,
79,
945,
364,
13,
81,
301,
1330,
34819,
198,
198,
34,
16820,
796,
37227,
59,
198,
198,
27,
15252,
9647,
2625,
4,
7,
10394,
8,
72,
1,
6001,
2625,
4,
7,
17015,
8,
72,
532... | 2.352018 | 892 |
'''
Convolutional AutoEncoder
This code serves to load a pretrained model and to train for more epochs with
selected data.
'''
import LRsymmetrizer as sym
import tensorflow as tf
import tensorflow.keras as keras
from keras.preprocessing import image
from keras.models import Model, model_from_json, load_model
import os, time
import numpy as np
import matplotlib.pyplot as plt
print(tf.version.VERSION)
print(tf.keras.__version__)
source_folder = os.getcwd()+'/tinygray'
# load and process the training/test data with load_img
start_load = time.time()
sub_source_folder = source_folder+'/train'
train_image = []
for i in os.listdir(sub_source_folder):
for ang in (0,10,20,30,40):
if i.endswith('_%d.png' % ang):
img = sym.feed_processor(i,ang,sub_source_folder)
# img.shape = (128,64) for vertically stacked imgs of (h,w) = (64,64)
img = img/255. #rescale to [0,1]
train_image.append(img)
train_image = np.array(train_image)
# train_image.shape = (N,128,64) for N imgs in sub_source_folder
train_image = np.expand_dims(train_image,axis=3)#keras format needs a dimension for the color channel
sub_source_folder = source_folder+'/test'
test_image = []
for i in os.listdir(sub_source_folder):
for ang in (0,10,20,30,40):
if i.endswith('_%d.png' % ang):
img = sym.feed_processor(i,ang,sub_source_folder)
# img.shape = (128,64) for vertically stacked imgs of (h,w) = (64,64)
img = img/255. #rescale to [0,1]
test_image.append(img)
test_image = np.array(test_image)
# train_image.shape = (N,128,64) for N imgs in sub_source_folder
test_image = np.expand_dims(test_image,axis=3)#keras format needs a dimension for the color channel
print('load_img takes time = ',time.time()-start_load)
loaded_model = load_model("HoloEncoder_C56789DDC98765.h5")
loaded_model.summary()
# model training
start_training = time.time()
loaded_model.fit(train_image, train_image,
epochs=50,
batch_size=8,
shuffle=True,
validation_data=(test_image, test_image))
print('Training takes time = ',time.time()-start_training)
loaded_model.save("HoloEncoder_C56789DDC98765.h5")
# model validation
decoded_imgs = loaded_model.predict(test_image[:4])
decoded_imgs = loaded_model.predict(decoded_imgs)
decoded_imgs = loaded_model.predict(decoded_imgs)
decoded_imgs = loaded_model.predict(decoded_imgs)
# plot comparison of test_image and its reconstruction
plt.figure(figsize=(64,32))
for i in range(4):
#original
ax = plt.subplot(2,4,i+1)
plt.imshow(test_image[i].reshape(128,64)) #reshape from flatten&grayscale
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
#reconstruction
ax = plt.subplot(2,4,i+1+4)
plt.imshow(decoded_imgs[i].reshape(128,64)) #reshape from flatten&grayscale
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('argvalidation_img.png')
| [
7061,
6,
198,
3103,
85,
2122,
282,
11160,
27195,
12342,
198,
1212,
2438,
9179,
284,
3440,
257,
2181,
13363,
2746,
290,
284,
4512,
329,
517,
36835,
82,
351,
198,
34213,
1366,
13,
198,
7061,
6,
198,
11748,
37491,
1837,
3020,
316,
380,
... | 2.403009 | 1,263 |
from .trial_scheduler import TrialScheduler
from .online_scheduler import OnlineScheduler, OnlineSuccessiveDoublingScheduler, ChaChaScheduler
| [
6738,
764,
45994,
62,
1416,
704,
18173,
1330,
21960,
50,
1740,
18173,
198,
6738,
764,
25119,
62,
1416,
704,
18173,
1330,
7467,
50,
1740,
18173,
11,
7467,
33244,
425,
40287,
11108,
50,
1740,
18173,
11,
20703,
1925,
64,
50,
1740,
18173,
... | 3.380952 | 42 |
#Chapter 9 notes
#1
#states = {} <- dictionary
#states = {"Virginia": "Richmond", "Maryland": "Annapolis", "New York": "Albany"}
#print states . . . {'Maryland': 'Annapolis', 'New York': 'Albany', 'Virginia': 'Richmond'}
#states['Oregon'] = 'Salem' <- adding something to the dictionary
#states.pop(Oregon) = Salem; States = line 4 original list
#2
#user_emails = {'15363': 'doughnut.lover@gmail.com'}
#user_emails['15363'] = "rpmva@udel.edu"
#user_emails
#result --> {'15363': 'rpmva@udel.edu'}
#3 Getting Information About a Dictionary
#isbns = {'1234': 'Easy Math', '1357': "Things are odd', '2468': Let\'s break even"}
#isbns['2468'] = "Let's Break Even"
#isbns.has_key('1111') = False; <- check if a key is in a dictionary
#isbns.has_key('1234') = True;
#isbns.keys() = ['1234', '2468', '1357'] <- get keys
#isbns.values() = ['Easy Math', "Let's Break Even", 'Things are odd'] <- get values
#4
#d = {'one': 1, 'two': 2}
#'one' in d -> True
#'four' in d -> False
#5, Comparing Dictionaries
#a = {1: 'one', 2: 'two', 3: 'three'}
#b = {1: 'one', 2: 'two', 3: 'three'}
# c = {2: 'two', 3: 'three', 4: 'four'}
# a == b -> True
# b == c -> False
| [
2,
14126,
860,
4710,
198,
2,
16,
198,
2,
27219,
796,
23884,
24293,
22155,
198,
2,
27219,
796,
19779,
41017,
1298,
366,
14868,
6327,
1600,
366,
24119,
1044,
1298,
366,
18858,
11174,
1600,
366,
3791,
1971,
1298,
366,
2348,
65,
1092,
206... | 2.505447 | 459 |
# Copyright Fortior Blockchain, LLLP 2021
# Imports
import numpy as np
import pandas as pd
from pandas_datareader import data
ALGO = data.DataReader("ALGO-USD",
start='2019-9-18',
end='2021-9-14',
data_source='yahoo')
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.preprocessing import MinMaxScaler
# Data
ALGO = ALGO[~ALGO.index.duplicated()]
sns.set(style='darkgrid')
plt.figure(figsize=(12,8))
plt.title("ALGO Prices", fontsize=15)
sns.lineplot(x=ALGO.index, y='Adj Close', data=ALGO)
plt.show(block=True)
hist = []
target = []
length = 30
adj_close = ALGO['Adj Close']
# iterate
for i in range(len(adj_close) - length):
x = adj_close[i:i+length]
y = adj_close[i+length]
hist.append(x)
target.append(y)
hist = np.array(hist)
target = np.array(target)
target = target.reshape(-1,1)
# Shape
print(hist.shape)
print(target.shape)
# Data splut
X_train = hist[:300]
X_test = hist[300:]
y_train = target[:300]
y_test = target[300:]
sc = MinMaxScaler()
# Train set, fit_transform
X_train_scaled = sc.fit_transform(X_train)
y_train_scaled = sc.fit_transform(y_train)
# Test set, only transform
X_test_scaled = sc.fit_transform(X_test)
y_test_scaled = sc.fit_transform(y_test)
X_train_scaled = X_train_scaled.reshape((len(X_train_scaled), length, 1))
X_test_scaled = X_test_scaled.reshape((len(X_test_scaled), length, 1))
# Model
model = tf.keras.Sequential()
model.add(layers.LSTM(units=64, return_sequences=True, input_shape=(90,1), dropout=0.2))
model.add(layers.LSTM(units=64, return_sequences=True, input_shape=(90,1), dropout=0.2))
model.add(layers.LSTM(units=32, return_sequences=True, dropout=0.2))
model.add(layers.LSTM(units=32, return_sequences=True, dropout=0.2))
model.add(layers.LSTM(units=16, dropout=0.2))
model.add(layers.Dense(units=1))
model.summary()
model.compile(optimizer='adam', loss='mean_squared_error')
# Optimizer
history = model.fit(X_train_scaled, y_train_scaled,
epochs=120, batch_size=20)
loss = history.history['loss']
epoch_count = range(1, len(loss) + 1)
# Plot
plt.figure(figsize=(12,8))
plt.plot(epoch_count, loss, 'r--')
plt.legend(['Training Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
pred = model.predict(X_test_scaled)
pred_transformed = sc.inverse_transform(pred)
y_test_transformed = sc.inverse_transform(y_test_scaled)
plt.figure(figsize=(12,8))
plt.plot(y_test_transformed, color='blue', label='Real')
plt.plot(pred_transformed, color='red', label='Prediction')
plt.title('ALGO Price Prediction')
plt.legend()
plt.show()
| [
2,
15069,
6401,
1504,
29724,
11,
406,
3069,
47,
33448,
198,
198,
2,
1846,
3742,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
62,
19608,
533,
5067,
1330,
1366,
198,
1847,
11230,
796,
... | 2.354497 | 1,134 |
import base64
import csv
import json
import os
from google.oauth2 import service_account
from googleapiclient.discovery import build
if __name__ == "__main__":
spreadsheet_id = os.environ["SPREADSHEET_ID"]
range_name = os.environ["RANGE_NAME"]
credential = os.environ["GDOCS_SERVICE_ACCOUNT"]
credential = decode_credential(credential)
scopes = ["https://www.googleapis.com/auth/spreadsheets"]
write_sheet(spreadsheet_id, range_name, credential, scopes)
| [
11748,
2779,
2414,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
23645,
13,
12162,
1071,
17,
1330,
2139,
62,
23317,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
628,
628,
628,
198,
3... | 2.820809 | 173 |
# Generated by Django 2.2.4 on 2019-08-25 02:20
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
13130,
12,
2919,
12,
1495,
7816,
25,
1238,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
"""Types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import apache_beam as beam
import numpy as np
from tensorflow_data_validation.types_compat import Dict, Text, Union
FeatureName = Union[bytes, Text]
# Feature type enum value.
FeatureNameStatisticsType = int
# Type of the input batch.
ExampleBatch = Dict[FeatureName, np.ndarray]
# For use in Beam type annotations, because Beam's support for Python types
# in Beam type annotations is not complete.
BeamFeatureName = beam.typehints.Union[bytes, Text]
# pylint: enable=invalid-name
| [
37811,
31431,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
2471,
4891,
62,
40045,
355,
15584,
1... | 3.438202 | 178 |
# Relative imports
from ._estimator import CanonicalRNNEstimator
__all__ = ["CanonicalRNNEstimator"]
| [
2,
45344,
17944,
198,
6738,
47540,
395,
320,
1352,
1330,
19507,
605,
49,
6144,
22362,
320,
1352,
198,
198,
834,
439,
834,
796,
14631,
6090,
261,
605,
49,
6144,
22362,
320,
1352,
8973,
198
] | 3 | 34 |
"""
Transitions for Nivre's parser
The parser state consists of the stack, the queue, and the partial graph
The partial graph is represented as a dictionary
"""
__author__ = "Pierre Nugues"
import conll
import dparser
def shift(stack, queue, graph):
"""
Shift the first word in the queue onto the stack
:param stack:
:param queue:
:param graph:
:return:
"""
stack = [queue[0]] + stack
queue = queue[1:]
return stack, queue, graph
def reduce(stack, queue, graph):
"""
Remove the first item from the stack
:param stack:
:param queue:
:param graph:
:return:
"""
return stack[1:], queue, graph
def right_arc(stack, queue, graph, deprel=False):
"""
Creates an arc from the top of the stack to the first in the queue
and shifts
The deprel argument is either read from the manually-annotated corpus
(deprel=False) or assigned by the parser. In this case, the deprel
argument has a value
:param stack:
:param queue:
:param graph:
:param deprel: either read from the manually-annotated corpus (value false)
or assigned by the parser
:return:
"""
graph['heads'][queue[0]['id']] = stack[0]['id']
if deprel:
graph['deprels'][queue[0]['id']] = deprel
else:
graph['deprels'][queue[0]['id']] = queue[0]['deprel']
return shift(stack, queue, graph)
def left_arc(stack, queue, graph, deprel=False):
"""
Creates an arc from the first in the queue to the top of the stack
and reduces it.
The deprel argument is either read from the manually-annotated corpus
(deprel=False) or assigned by the parser. In this case, the deprel
argument has a value
:param stack:
:param queue:
:param graph:
:param deprel: either read from the manually-annotated corpus (value false)
or assigned by the parser
:return:
"""
graph['heads'][stack[0]['id']] = queue[0]['id']
if deprel:
graph['deprels'][stack[0]['id']] = deprel
else:
graph['deprels'][stack[0]['id']] = stack[0]['deprel']
return reduce(stack, queue, graph)
def can_reduce(stack, graph):
"""
Checks that the top of the stack has a head
:param stack:
:param graph:
:return:
"""
if not stack:
return False
if stack[0]['id'] in graph['heads']:
return True
else:
return False
def can_leftarc(stack, graph):
"""
Checks that the top of the has no head
:param stack:
:param graph:
:return:
"""
if not stack:
return False
if stack[0]['id'] in graph['heads']:
return False
else:
return True
def can_rightarc(stack):
"""
Simply checks there is a stack
:param stack:
:return:
"""
if not stack:
return False
else:
return True
def empty_stack(stack, graph):
"""
Pops the items in the stack. If they have no head, they are assigned
a ROOT head
:param stack:
:param graph:
:return:
"""
for word in stack:
if word['id'] not in graph['heads']:
graph['heads'][word['id']] = '0'
graph['deprels'][word['id']] = 'ROOT'
stack = []
return stack, graph
def equal_graphs(sentence, graph):
"""
Checks that the graph corresponds to the gold standard annotation of a sentence
:param sentence:
:param graph:
:return:
"""
equal = True
for word in sentence:
if word['id'] in graph['heads'] and word['head'] == graph['heads'][word['id']]:
pass
else:
#print(word, flush=True)
equal = False
return equal
if __name__ == '__main__':
pass | [
37811,
198,
8291,
1756,
329,
399,
452,
260,
338,
30751,
198,
464,
30751,
1181,
10874,
286,
262,
8931,
11,
262,
16834,
11,
290,
262,
13027,
4823,
198,
464,
13027,
4823,
318,
7997,
355,
257,
22155,
198,
37811,
198,
198,
834,
9800,
834,
... | 2.523453 | 1,471 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.utils import print_debug
from misc.utils import PRINT
from models.utils import weights_init
from misc.normalization import AdainResBlk, ResBlk, MODResBlk, Conv2DMod
import numpy as np
# ----------------- #
# --- GENERATOR --- #
# ----------------- #
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
4981,
13,
26791,
1330,
3601,
62,
24442,
198,
6738,
12747,
13,
26791,
1330,
4810,
12394,
198,
6738,
4981,
13,
26791,
... | 3.387755 | 98 |
import pytest
from sme_financing.main import create_app, db
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope="class")
| [
11748,
12972,
9288,
198,
198,
6738,
895,
68,
62,
15643,
5077,
13,
12417,
1330,
2251,
62,
1324,
11,
20613,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
29891,
4943,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
... | 2.763889 | 72 |
"""
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
from Queue import PriorityQueue | [
37811,
198,
13102,
469,
479,
23243,
6692,
8341,
290,
1441,
340,
355,
530,
23243,
1351,
13,
16213,
2736,
290,
6901,
663,
13357,
13,
198,
198,
16281,
25,
198,
198,
20560,
25,
198,
58,
198,
220,
352,
3784,
19,
3784,
20,
11,
198,
220,
... | 2.469388 | 147 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628
] | 1.785714 | 14 |
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5 import QtGui
from UI.ui_updater import Ui_MainWindow
from database import Settings
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
13383,
27703,
11,
1195,
23416,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
8205,
72,
198,
6738,
12454,
13,
9019,
62,
929,
67,
729,
1330,
471,
72,
62,
13383,
27703,
198,... | 3.040816 | 49 |
class MouseEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.Control.MouseUp,System.Windows.Forms.Control.MouseDown,and System.Windows.Forms.Control.MouseMove events.
MouseEventArgs(button: MouseButtons,clicks: int,x: int,y: int,delta: int)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return MouseEventArgs()
@staticmethod
def __new__(self,button,clicks,x,y,delta):
""" __new__(cls: type,button: MouseButtons,clicks: int,x: int,y: int,delta: int) """
pass
Button=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets which mouse button was pressed.
Get: Button(self: MouseEventArgs) -> MouseButtons
"""
Clicks=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of times the mouse button was pressed and released.
Get: Clicks(self: MouseEventArgs) -> int
"""
Delta=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a signed count of the number of detents the mouse wheel has rotated,multiplied by the WHEEL_DELTA constant. A detent is one notch of the mouse wheel.
Get: Delta(self: MouseEventArgs) -> int
"""
Location=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the location of the mouse during the generating mouse event.
Get: Location(self: MouseEventArgs) -> Point
"""
X=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the x-coordinate of the mouse during the generating mouse event.
Get: X(self: MouseEventArgs) -> int
"""
Y=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the y-coordinate of the mouse during the generating mouse event.
Get: Y(self: MouseEventArgs) -> int
"""
| [
4871,
21839,
9237,
42035,
7,
9237,
42035,
2599,
201,
198,
37227,
201,
198,
47081,
1366,
329,
262,
4482,
13,
11209,
13,
8479,
82,
13,
15988,
13,
39643,
4933,
11,
11964,
13,
11209,
13,
8479,
82,
13,
15988,
13,
39643,
8048,
11,
392,
44... | 2.957006 | 628 |
# MIT License
# Copyright (c) 2018 Hotox
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Hotox
# Repo: https://github.com/michaeldegroot/cats-blender-plugin
# Edits by:
import bpy
import math
from . import common as Common
from . import armature_bones as Bones
from .register import register_wrap
from .translations import t
ignore_shapes = []
ignore_meshes = []
@register_wrap
@register_wrap
@register_wrap
@register_wrap
@register_wrap
@register_wrap
@register_wrap
@register_wrap
@register_wrap
| [
2,
17168,
13789,
198,
198,
2,
15069,
357,
66,
8,
2864,
6964,
1140,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
70... | 3.580046 | 431 |
import random,json | [
11748,
4738,
11,
17752
] | 4.5 | 4 |
if __name__ == '__main__':
import requests
import json
url = ('http://newsapi.org/v2/top-headlines?'
'country=us&'
'apiKey=9b8a6a83887b4da4ace3d23b91e57e89')
response = requests.get(url)
text = response.text
my_json = json.loads(text)
for i in range(0,11):
speak(my_json['article'][i]['title']) | [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
198,
220,
220,
220,
220,
220,
220,
1330,
7007,
198,
220,
220,
220,
220,
220,
220,
1330,
33918,
628,
220,
220,
220,
220,
220,
220,
19016,
796,
19203,
4023,
1378,
108... | 1.88835 | 206 |
from djongo import models
from django.contrib.auth.models import User
| [
6738,
42625,
25162,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
220,
220,
220,
220
] | 3.217391 | 23 |
# -*- coding:utf-8 -*-
from __future__ import with_statement
import os
import argparse
from fabric.api import *
from fabric.contrib.console import confirm
# add fabric function here
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# add argument to parser
# parser.add_argument()
args = parser.parse_args()
# call fabric function base on arguments
if True:
print 'Wrong input format, try -h'
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
351,
62,
26090,
198,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
198,
6738,
9664,
13,
15042,
1330,
1635,
198,
6738,
9664,
13,
3642,
... | 2.946667 | 150 |
import logging
from datetime import datetime, timedelta
from typing import Any, List, Optional
from fastapi import APIRouter, Depends, HTTPException, Security, status
from fastapi.security import OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from pydantic import BaseModel, ValidationError
from api.v1.deps import get_current_user, get_current_active_user, get_current_license_owner
from core.config import (ACCESS_TOKEN_EXPIRE_MINUTES, ALGORITHM, DOCTYPE_USER,
SECRET_KEY)
from core.security import (create_access_token, get_password_hash,
verify_password)
from crud.user import get_by_email, authenticate_user
from db.mongo import get_collection
from models.base import Msg
from models.token import Token, TokenData
from models.user import User, UserInDB
from utils import emailutils
# oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/v1/token")
router = APIRouter()
@router.post("/access-token", response_model=Token)
@router.get("/users/me", response_model=User)
@router.post("/test")
# def recover_password(email: str, db: Session = Depends(deps.get_db)) -> Any:
@router.post("/password-recovery/{email}", response_model=Msg)
async def recover_password(email: str) -> Any:
"""
Password Recovery
"""
user = await get_by_email(email=email)
logging.info(user)
if not user:
raise HTTPException(
status_code=404,
detail="The user with this username does not exist in the system.",
)
password_reset_token = emailutils.generate_password_reset_token(email=email)
emailutils.send_reset_password_email(
email_to=user["email"], email=email, token=password_reset_token
)
return {"message": "Password recovery email sent"}
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
32233,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
2129,
2412,
11,
14626,
16922,
11,
4765,
11,
3722,
198,... | 2.801538 | 650 |
t=int(input())
while(t>0):
t=t-1
n=int(input())
a=input().split()
for i in range(n):
a[i]=int(a[i])
b=[0]
c=0
d=0
for i in range(-1,-n,-1):
if(a[i]==a[i-1]):
c=c+0
d=d+1
else:
c=c+1+d
d=0
b.append(c)
for i in range(-1,-n-1,-1) :
print(b[i],end=" ")
print()
| [
83,
28,
600,
7,
15414,
28955,
198,
4514,
7,
83,
29,
15,
2599,
198,
220,
220,
220,
256,
28,
83,
12,
16,
198,
220,
220,
220,
299,
28,
600,
7,
15414,
28955,
198,
220,
220,
220,
257,
28,
15414,
22446,
35312,
3419,
198,
220,
220,
2... | 1.413793 | 290 |
from tictactoe.game.play import Game
| [
6738,
256,
713,
529,
2577,
13,
6057,
13,
1759,
1330,
3776,
198
] | 3.083333 | 12 |
"""
Currently just a stupid bridge between twisted.logger and user applications
"""
# from logging import getLogger as stdlib_getLogger, Logger
from twisted import logger
def getLogger(namespace: str = None) -> logger.Logger:
"""
Just an adapted to mimic python's logging getLogger to twisted's Logger()
:param namespace:
:return:
"""
return logger.Logger(namespace) if namespace else logger.Logger()
| [
37811,
198,
220,
220,
220,
16888,
655,
257,
8531,
7696,
1022,
19074,
13,
6404,
1362,
290,
2836,
5479,
198,
198,
37811,
198,
2,
422,
18931,
1330,
651,
11187,
1362,
355,
14367,
8019,
62,
1136,
11187,
1362,
11,
5972,
1362,
198,
6738,
190... | 3.205882 | 136 |
"""This module contains monitor genrules."""
load("//lib/bazel:py_rules.bzl", "py_binary")
| [
37811,
1212,
8265,
4909,
5671,
2429,
38785,
526,
15931,
198,
198,
2220,
7203,
1003,
8019,
14,
65,
41319,
25,
9078,
62,
38785,
13,
65,
48274,
1600,
366,
9078,
62,
39491,
4943,
198
] | 2.875 | 32 |
import models
import re
from . import base
from . import tasks_fetch_currency_exchange
from abc import abstractproperty
from typing import TypeVar, List, Optional | [
11748,
4981,
198,
11748,
302,
198,
198,
6738,
764,
1330,
2779,
198,
6738,
764,
1330,
8861,
62,
69,
7569,
62,
34415,
62,
1069,
3803,
198,
6738,
450,
66,
1330,
12531,
26745,
198,
6738,
19720,
1330,
5994,
19852,
11,
7343,
11,
32233
] | 3.97561 | 41 |
#!/usr/bin/env python
from PyQt4 import QtGui
from parse_qca import parse_qca_file
from auxil import qca_to_coef, hash_problem
import sys
from pprint import pprint
from random import shuffle
import numpy as np
#
#class QCACell(QtGui.QWidget):
# ''' '''
#
# def __init__(self, parent=None):
# super(QCACell, self).__init__(parent)
#
#class Canvas(QtGui.QWidget):
# ''' '''
#
# def __init__(self, parent=None):
# super(Canvas, self).__init__(parent)
#
#
# def paintEvent(self, e):
#
#class MainWindow(QtGui.QMainWindow):
# ''' '''
#
# def __init__(self):
# ''' '''
# super(MainWindow, self).__init__()
# self.initUI()
#
# def initUI(self):
#
# self.scrollarea = QtGui.QScrollArea()
# self.setCentralWidget(self.scrollarea)
#
# self.canvas = Canvas(self)
def mix_seriation(h, J):
'''Apply random invariants to h and J'''
# shuffle
inds = range(len(h))
shuffle(inds)
K = np.random.rand()
hs = 1-2*(np.random.rand()<.5)
h_ = h[inds]*K*hs
J_ = J[inds, :][:, inds]*K
return h_, J_, K, hs, inds
def main(fname):
''' '''
try:
cells, spacing, zones, J, fb = parse_qca_file(fname, one_zone=True)
except:
print('Failed to load qca file')
return
h, J = qca_to_coef(cells, spacing, J, adj='full')
h /= np.max(np.abs(J))
J /= np.max(np.abs(J))
for _ in range(100):
h_, J_, K, hp, inds = mix_seriation(h, J)
hval, K_, hp_, inds_ = hash_problem(h_, J_)
if False:
print('K: {0:.4f}\t hp: {1}\ninds: {2}'.format(K, hp, inds))
print('K: {0:.4f}\t hp: {1}\ninds: {2}'.format(K_, hp_, inds_))
print('hash val: {0}'.format(hval))
# app = QtGui.QApplication(sys.argv)
#
# w = MainWindow()
# w.show()
#
# sys.exit(app.exec_())
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
print('No QCAD file given...')
sys.exit()
main(fname) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
8205,
72,
201,
198,
6738,
21136,
62,
80,
6888,
1330,
21136,
62,
80,
6888,
62,
7753,
201,
198,
6738,
27506,
346,
1330,
10662,
6888,... | 1.773555 | 1,263 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
# First the noiseless case
X = np.array([3.])
# Observations
y = np.array([0.])
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.linspace(1, 5, 1000)
# Instantiate a Gaussian Process model
kernel = RBF(10, (1e-5, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X[:, np.newaxis], y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x[:, np.newaxis], return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.figure(1)
plt.plot(X, y, 'r.', markersize=10, label='Observations')
plt.plot(x, y_pred, 'b-', label='Prediction')
plt.fill_between(x, y_pred - sigma, y_pred + sigma,
alpha=0.2, color='k')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.legend(loc='upper left')
plt.savefig('demo/skx.png')
# First the noiseless case
X = np.array([2., 4., 5.])
# Observations
y = np.array([0., 0., 0.])
kernel = RBF(1, (1e-5, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gp.fit(X[:, np.newaxis], y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x[:, np.newaxis], return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.figure(2)
plt.plot(X, y, 'r.', markersize=10, label='Observations')
plt.plot(x, y_pred, 'b-', label='Prediction')
plt.fill_between(x, y_pred - sigma, y_pred + sigma,
alpha=0.2, color='k')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.legend(loc='upper left')
plt.savefig('demo/sGkUx.png') | [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
6738,
1341,
35720,
13,
4908,
31562,
62,
14681,
1330,
12822,
31562,
18709,
8081,
44292,
198,
6738,
1341,
35720,
13,
4908,
31562,
62,
... | 2.464789 | 781 |
from django.shortcuts import render
def home(request):
"""
Home page for Tethys Datasets
"""
context = {}
return render(request, 'tethys_datasets/home.html', context)
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
628,
198,
4299,
1363,
7,
25927,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5995,
2443,
329,
309,
2788,
893,
16092,
292,
1039,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.714286 | 70 |
"""
Unit and regression test for the convert module of the molsysmt package.
"""
# Import package, test suite, and other packages as needed
import molsysmt as msm
import numpy as np
import os
# Whole systems (selection='all' and structure_indices='all')
# Selection
## Multiple outputs
| [
37811,
198,
26453,
290,
20683,
1332,
329,
262,
10385,
8265,
286,
262,
285,
10220,
893,
16762,
5301,
13,
198,
37811,
198,
198,
2,
17267,
5301,
11,
1332,
18389,
11,
290,
584,
10392,
355,
2622,
198,
11748,
285,
10220,
893,
16762,
355,
28... | 3.62963 | 81 |
import numpy as np
from .. import get_spaced_index_list
from ..features import BinaryForegroundMaskComputer
class ForegroundSizeScorer(object):
"""Scores the size of the foreground as a percentage of the average mask size over the entire frame area."""
def score_foreground_size(self, fg_masks):
"""Scores the size of the foreground as a percentage of the average mask size over the entire frame area.
:param fg_masks: The foreground masks associated with each frame (list of HxW bool NumPy arrays with False for
excluded pixels)
:return: float
"""
fg_percentages = [fg_mask.sum() / fg_mask.size for fg_mask in fg_masks]
return np.mean(fg_percentages)
class AutomaticForegroundSizeScorer(object):
"""Detects all foreground objects and computes their total mask size averaged over the entire frame area."""
def __init__(self, frame_spacing=None, num_sampled_frames=None):
"""Constructor
:param frame_spacing: The spacing between two consecutive sampled frames (int)
:param num_sampled_frames: The total number of frames to sample throughout the video (int)
"""
self.frame_spacing = frame_spacing
self.num_sampled_frames = num_sampled_frames
self.foreground_size_scorer = ForegroundSizeScorer()
self.binary_foreground_mask_computer = BinaryForegroundMaskComputer()
def score_foreground_size(self, frames, **kwargs):
"""Detects all foreground objects and computes their total mask size averaged over the entire frame area.
:param frames: The video frames over which the compute FG masks (list of HxWxC uint8 NumPy arrays)
:param kwargs: Additional keyword arguments to use for debugging (dict)
:return: float
"""
frame_indexes = get_spaced_index_list(len(frames), total=self.num_sampled_frames, spacing=self.frame_spacing)
masks = [self.binary_foreground_mask_computer.compute_binary_foreground_mask(frames[i], **kwargs)
for i in frame_indexes]
return self.foreground_size_scorer.score_foreground_size(masks)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
1330,
651,
62,
2777,
2286,
62,
9630,
62,
4868,
198,
6738,
11485,
40890,
1330,
45755,
16351,
2833,
45195,
34556,
628,
198,
4871,
4558,
2833,
10699,
3351,
11934,
7,
15252,
2599,
198,
2... | 2.843215 | 759 |
#!/usr/bin/env python3
# bkmk.py
# Author: Lorenzo Van Muñoz
# Last updated Dec 31, 2020
'''
A script to generate pdf bookmarks in cpdf,gs,pdftk syntaxes
This script uses regular expressions to identify
relevant bookmark information and to typeset the syntaxes.
It also supports conversion between the different supported formats.
Anyone is welcome to use it or improve upon it. Hopefully it makes
the bookmark creation process easier for anyone needing to turn
tables of contents into bookmarks, especially for large documents
such as textbooks.
Though it works as is, if it were to change in any way I would maybe
create a syntax class with reading/writing methods as an alternative
to the current dictionary bookmark system.
In addition, the only other 'difficult' part about this script is to
properly handle the detection of bookmark index/level and changing it
between the different formats. For example, cpdf and pdftk directly
reference the current level of the entry, however gs/pdfmarks uses a
hierarchical structure where each entry may have 'children'/subentries.
Converting between these to has been implemented one way with while loops
and the other way using recursion. Hopefully these are the only generic
formats and any new ones are just minor variations of these.
Have fun and work fast!
'''
import os
import re
import argparse
import pdftotext
from pdf_tchotchke.utils import filenames
# Global variable define available re flags
RE_FLAGS = {
"A" : re.ASCII,
"I" : re.IGNORECASE,
"L" : re.LOCALE,
"M" : re.MULTILINE,
"S" : re.DOTALL,
"X" : re.VERBOSE,
"U" : re.UNICODE
}
# Global variable syntax data structure for supported syntaxes
BKMK_SYNTAX = {
# Each syntax format has two properties: a print statement to
# print data to that format and a sense statement which is a
# regular expression to detect whether a line has that format
# The data to print corresponds to (x,y,z) = (index,title,page)
"cpdf" : {
"print" : (lambda x,y,z: f"{z} \"{x}\" {y}\n"),
"sense" : r"(?P<index>\d+) \"(?P<title>.+)\" (?P<page>\d+).*"
# View information is given by "[<page number></view command>]"
},
"gs" : {
# the minus sign before the count leaves the menu unexpanded
"print" : (lambda x,y,z: f"[ /Count -{z} /Page {y} /Title ({x}) /OUT pdfmark\n"),
"sense" : r"\[ /Count [-]*(?P<index>\d+) /Page (?P<page>\d+) /Title \((?P<title>.+)\) /OUT pdfmark.*"
# In addition, the /View [</view command>] option and its variations can be added
},
"pdftk" : {
"print" : (lambda x,y,z: f"BookmarkBegin\nBookmarkTitle: {x}\nBookmarkLevel: {z}\nBookmarkPageNumber: {y}\n"),
"sense" : r"BookmarkBegin.*\nBookmarkTitle: (?P<title>.+).*\nBookmarkLevel: (?P<index>\d+).*\nBookmarkPageNumber: (?P<page>\d+).*"
}
}
def whichSyntax(data):
'''
Tests whether the given entry is a bookmark
Arguments:
List : hopefully lines from a bookmark file
Returns:
String or Error : "cpdf" or "gs" syntax, None if not any syntax
'''
for e in list(BKMK_SYNTAX):
if bool(re.search(BKMK_SYNTAX[e]["sense"],data)):
return e
raise UserWarning("The file is does not match any supported syntax")
def convertSyntax(data,output_syntax=None,offset=0):
'''
Converts one bookmark file syntax into another file.
Should detect the input syntax automatically and write
to the specified syntax.
This isn't a necessary feature since if the bookmark file
is already there, then just use the corresponding program
to add the bookmarks.
But maybe just do this for completeness.
This can also renumber the pages by the given offset
'''
input_syntax = whichSyntax(data)
if output_syntax == None:
output_syntax = input_syntax
titles, pages, indices = extractBkmkFile(
data,BKMK_SYNTAX[input_syntax]["sense"])
return writeBkmkFile(output_syntax,
titles,
[int(e) + offset for e in pages],
indices,
index_input_syntax=input_syntax)
def createTocFromText(data, output_syntax=None,
pattern="(?P<title>.+)\n(?P<page>\d+)",
re_flags=re.U, edit=''):
'''
This function takes lines from a bookmarks in a raw text file and outputs them to a specified bookmark syntax.
It also needs to ask interactively for the page offset to calculate the page numbering right.
Arguments:
String : has the content of f.read() from an input file generated from the text of a pdf TOC
String : either "cpdf" or "gs", representing the output syntax
String : a regular expression string containing (?P<page>\d+) and (?P<title>.+) groups to parse the page numbers and entry text from the input file
re.FLAG : a regular expression flag defaulting to re.UNICODE
String : a regexp to apply to all titles. e.g. to remove all leading numbers: r'^[\d\.]+\.'
Return:
String : the finalized bookmark entries
'''
if output_syntax == None:
raise UserWarning('No output syntax has been specified. Aborting!')
# check that given re has only the required fields
re_pattern = re.compile(rf"{pattern}",re_flags)
assert set(['title','page']) == set(re_pattern.groupindex.keys())
# initial data for the first entry with page number > 0
matches = re_pattern.finditer(data)
for m in matches:
first_entry = m.group("title")
first_page = m.group("page")
if int(first_page) > 0:
break
else:
continue
try:
# Ask for the page offset
offset_str = input(f"Enter the page in the pdf for the following TOC entry:\nText: {first_entry}\nPage: {first_page}\n> ")
except NameError:
raise UserWarning('No match to the pattern was found in the bookmark data')
offset = int(offset_str) - int(first_page)
# OPTIONAL delete regexp from the titles
edits = {
False : (lambda x : x),
True : (lambda x : re.sub(edit,'',x))
}
titles,pages = extractBkmkFile(data,re_pattern)
return writeBkmkFile(output_syntax,
[edits[bool(edit)](e) for e in titles],
[int(e) + offset for e in pages],
getCPDFIndexFromTitle([e for e in titles]),
index_input_syntax="cpdf")
def getCPDFIndexFromTitle(title_list):
'''
Determine the cpdf index of an entry (this is simplistic and the logic should be refined depending on the content)
Arguments:
String : ideally the title of a table of contents entry
Returns:
Integer : 0 if the line starts without an integer or an integer without trailing decimal
1 if the line starts with a decimal like X.X where X are integers
2 if the line starts with a double decimal like X.X.X
3 the pattern goes on
'''
# Note that this only outputs the cpdf convention! This is fixed by repairIndex()
# keywords which should be at top index level
keywords = ['Chapter', 'chapter', 'Capítulo', 'capítulo',
'Appendix', 'appendix', 'Apéndice', 'apéndice']
# start indexing
indices = [1 for e in title_list]
for i,title in enumerate(title_list):
# This enforces no empty lines as well as getting index
while bool(re.match("^\w+" + indices[i] * "\.[0-9]+",title)):
indices[i] += 1
# subtract 1 because of construction of while loop
indices[i] -= 1
# For things like exercises, which recur as subsections in the TOC
# but would still be 0 in the previous system, promote them to index 1
# if the first word in that title repeats at least 5 times in the TOC
# where 5 is arbitrary but also a small number of chapters for a
# textbook
if indices[i] == 0:
m = re.match(r'\D+',title)
if bool(m):
if m.group(0) not in keywords:
if (len([e for e in title_list
if m.group(0) in e])
> 4):
indices[i] += 1
return indices
def extractBkmkFile(data,pattern):
'''
This matches a regexp to a bkmk file, returning all the instances of each match group in its own list
Returns a tuple with the lists
'''
# Must use this order! index must be last other wise the case from createTocFromFile() which has no index will fail
preferred_order = {
"title" : 1,
"page" : 2,
"index" : 3
}
pattern = re.compile(pattern)
groups = dict(pattern.groupindex)
# this is the case where we are creating a new bkmk which doesn't yet have indices
if len(groups.keys()) == 2:
del preferred_order['index']
# in the preferred order, list all matches in each group as its own list (possibly a permutation bsed on the ordering of the matching group)
return [ [ e[groups[i]-1].strip() for e in re.findall(pattern,data) ] for i in list(preferred_order.keys()) ]
def writeBkmkFile(output_syntax,titles, pages, indices,index_input_syntax=""):
'''
I was doing this over 5 times in the code so decided to centralize it
This takes in lists with the titles, pages, indices, and exports a string in the requested format
'''
bkmks = ""
for i,_ in enumerate(indices):
bkmks += BKMK_SYNTAX[output_syntax]["print"](
titles[i],pages[i],indices[i])
if output_syntax == index_input_syntax or not bool(index_input_syntax):
return bkmks
else: # the index input syntax is not the same as the output syntax
return repairIndex(bkmks,index_input_syntax) # careful, recursion
def repairIndex(bkmks, index_input_syntax):
'''
This function preserves the syntax of a bkmk file but repairs the indices to match that syntax.
This function is necessary because each of formats has its own convention.
For instance the index in cpdf is starts from 0 and refers to how many levels deep into the TOC that entry is.
The pdftk index is the same logic as cpdf but 1-indexed (add 1 to the cpdf index).
In gs, the index given by /Count N means that that entry has N child entries in the next sublevel.
Arguments:
String : The bookmark file
String : (Optional) The index input syntax (this can be detected regardless)
Returns:
String : The finalized bookmark file
'''
output_syntax = whichSyntax(bkmks)
if output_syntax == index_input_syntax:
return bkmks
else:
titles, pages, indices = extractBkmkFile(bkmks,BKMK_SYNTAX[output_syntax]["sense"])
indices = [int(e) for e in indices]
# convert!
if output_syntax == "gs": #
# convert cpdf or pdftk index to gs index (works because this is a comparison method)
for i,e in enumerate(indices):
indices[i] = 0
try:
# finds the number of subsequent indices 1 larger than the current one before the next index which has the same value as the current one
counter = 0
while indices[i + 1 + counter] != e:
if indices[i + 1 + counter] == e + 1:
indices[i] += 1
counter += 1
except IndexError:
pass
else: # outputting to cpdf or pdftk
if index_input_syntax == "gs":
# convert gs to cpdf
# in this loop, we go from end to beginning and get the cpdf index at each step
# each run through this loops determines how many of the preceeding entries are parents of indices[i]
def recursiveDeleteTerminalBranches(roots):
'''
This takes in a list and removes terminal branches until there are none
'''
tree = roots[:]
for i,e in list(enumerate(tree))[::-1]:
if bool(e):
try:
# if every index in that range is zero, it is a terminal branch
# note that if tree[i] is in the range(e) (i.e. len(tree[i+1:i+1+e]) < len(range(e)))
# then there is match, so we won't delete it, as desired
if tree[i+1:i+1+e] == [0 for x in range(e)]:
# replace e with a zero but remove e entries
del tree[i:i+e]
# prune the tree
tree = recursiveDeleteTerminalBranches(tree)
else:
continue
except IndexError:
continue
else:
continue
#print(tree)
return tree
results = [0 for e in indices]
fast_search = 0
for i,_ in enumerate(indices):
results[i] = len([x for x in recursiveDeleteTerminalBranches(indices[fast_search:i]) if x > 0])
# if the entry has no parent, ignore all the preceeding entries
if results[i] == 0:
fast_search = i
indices = results
if output_syntax == "pdftk":
# convert cpdf to pdftk by adding 1
indices = [ e + 1 for e in indices ]
elif index_input_syntax == "pdftk": # output_syntax == "cpdf"
# convert pdftk to cpdf by subtracting 1
indices = [ e - 1 for e in indices ]
else: # converting cpdf to pdftk by adding 1
indices = [ e + 1 for e in indices ]
return writeBkmkFile(output_syntax, titles, pages, indices)
def importPDFTOC(args):
'''
This function uses pdftotext to read in the table of contents of a pdf
and then does some repetitive tasks that help prepare that text output
for bkmk create. It mostly just deletes blank lines, tries merging entries
split across multiple lines. Adds an @ symbol right before the page number
so that it can be read in easily using the default pattern for bkmk create.
However the bkmk file is not passed to bkmk create because the person making
it should still review the file as there are bound to be errors or things
they would like to adjust. In addition, this function renumbers
This function takes in a file object and returns a string whose contents are
the TOC to review
'''
if args.pdftotext:
f_page = int(input('Provide the pdf page number of the start of the TOC> '))
l_page = int(input('Provide the pdf page number of the end of the TOC> '))
pdf = pdftotext.PDF(args.input)
toc = ''.join([pdf[i] for i in range(f_page-1,l_page)])
else:
toc = args.input.read()
# begin routine manipulations
# remove leading spaces
toc = re.sub(r'\n[ \t]+', r'\n', toc)
# remove instances of keywords or leading/trailing space
contents_page_patterns = [r'[cC]ontents', r'pages*', r'Índice',
r'\n[ \txvi]+', r'\A[ \n\t]+',]
for p in contents_page_patterns:
toc = re.sub(p, r'', toc)
# remove indentations and multiple spaces
toc = re.sub(r'[ \t][ \t]+', r' ', toc)
# remove blank lines and trailing space
toc = re.sub(r'[ \t]*\n[ \t]*\n*', r'\n', toc)
# if the beginning of toc has roman numerals
# replace those with 0 (these will be skipped
# by the create function when it comes to the
# correct numbering)
toc = re.sub(r' [xvi]+\n',r' 0\n',toc)
# add an @ before each page number at EOL
toc = re.sub(r' (\d+)\n',r' @\1\n',toc)
# merge split lines (e.g. those which don't
# end with a number or numeral but have at
# least two words)
toc = re.sub(r'(\D+) (\D+[^xvi0-9])\n(.+) (\d+)\n', r'\1 \2 \3 \4\n', toc)
# May need to escape quotations? " -> \"
args.output.write(toc)
if args.yolo:
# make the current output the input to create
new_path = os.path.dirname(args.output.name)
new_name = os.path.basename(args.output.name)
args.output.close()
args.input = open(os.path.join(new_path, new_name), 'r')
args.output = open(filenames.fileOut(os.path.join(
new_path, 'bkmk_' + new_name)), 'w')
create(args)
return
def create(args):
'''
Calls the right functions to make things create
'''
args.output.write(
createTocFromText(
args.input.read(),
output_syntax=args.syntax,
pattern=args.pattern,
re_flags=RE_FLAGS[args.re_flags],
edit=args.edit
)
)
return
def convert(args):
'''
Calls the right functions to make things convert
'''
args.output.write(
convertSyntax(
args.input.read(),
output_syntax=args.syntax,
offset=args.number
)
)
return
def cli():
'''
Run the bkmk.py script.
This handles its command-line arguments and executes the requested functions.
'''
# Define command-line arguments
parser = argparse.ArgumentParser(
prog='bkmk',
description='''a script to produce pdf bookmarks''')
subparsers = parser.add_subparsers(help='action!')
# Subparsers for each command
parser_convert = subparsers.add_parser(
"convert",
help="change a bookmark file syntax or renumber the pages")
parser_convert.set_defaults(func=convert)
# convert arguments
parser_convert.add_argument(
"-n", "--number", type=int, default=0,
help="apply an offset to all page numbers")
parser_create = subparsers.add_parser(
"create",
help="create bookmarks from a raw TOC file")
parser_create.set_defaults(func=create)
# create arguments
parser_create.add_argument(
"-p", "--pattern", default="(?P<title>.+)@(?P<page>\d+)",
help="regexp to read the input file containing"
"(?P<page>\d+) and (?P<title>.+) groups")
parser_create.add_argument(
"-r", "--re-flags", choices=list(RE_FLAGS), default="U",
help="optionally add a regexp flag to specify --pattern")
parser_create.add_argument(
"-e", "--edit",
help="apply a regexp to the title, e.g. to removing leading"
"numbers: r'^[\d\.]+\.'", default="")
# import arguments
parser_import = subparsers.add_parser(
"import",
help="read in a pdf to get a rough TOC that will need inspection")
parser_import.set_defaults(func=importPDFTOC)
parser_import.add_argument(
"-pdf", dest="pdftotext", action="store_true",
help="instead of reading in a raw TOC, read it from pdf with pdftotext")
parser_import.add_argument(
"-y", "--yolo", action="store_true",
help="pass the imported bkmk to create without revision")
parser_import.add_argument(
"-p", "--pattern", default="(?P<title>.+)@(?P<page>\d+)",
help="regexp to read the input file containing"
"(?P<page>\d+) and (?P<title>.+) groups")
parser_import.add_argument(
"-r", "--re-flags", choices=list(RE_FLAGS), default="U",
help="optionally add a regexp flag to specify --pattern")
parser_import.add_argument(
"-e", "--edit",
help="apply a regexp to the title, e.g. to removing leading"
"numbers: r'^[\d\.]+\.'", default="")
# Main arguments
parser.add_argument(
"syntax", choices=list(BKMK_SYNTAX),
help="choose bookmark output format")
parser.add_argument(
"input",
help="input file name")
parser.add_argument(
"-o", dest="output",
help="output file name")
args = parser.parse_args()
args.input, args.output = filenames.fileIO(args.input, args.output,
writeext='.txt')
print("bkmk.py - a script to manipulate pdf bookmarks\n")
if args.func == importPDFTOC:
if args.pdftotext:
readmode = 'rb'
else:
readmode = 'r'
with open(args.input, readmode) as args.input:
with open(args.output, 'w') as args.output:
args.func(args)
# Close script
print("\nBookmarks finished!")
return
# run script if called from command line
if __name__ == "__main__":
cli()
raise SystemExit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
275,
13276,
74,
13,
9078,
198,
2,
6434,
25,
41721,
6656,
8252,
12654,
8590,
198,
2,
4586,
6153,
4280,
3261,
11,
12131,
198,
198,
7061,
6,
198,
32,
4226,
284,
7716,
3712... | 2.314805 | 9,193 |
from django.conf.urls import url
from .views import OrganizationDetailView, OrganizationListView
urlpatterns = [
url(r'^(?P<slug>[-\w]+)/$', OrganizationDetailView.as_view(), name='organization_detail'),
url(r'^$', OrganizationListView.as_view(), name='organization_list'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
33571,
1330,
12275,
11242,
603,
7680,
11,
12275,
8053,
7680,
628,
198,
6371,
33279,
82,
796,
685,
628,
220,
220,
220,
19016,
7,
81,
6,
61,
7,
30,
47,
2... | 2.9 | 100 |
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
Base class template for transformers.
class name: BaseTransformer
Covers all types of transformers.
Type and behaviour of transformer is determined by the following tags:
"scitype:transform-input" tag with values "Primitives" or "Series"
this determines expected type of input of transform
if "Primitives", expected inputs X are pd.DataFrame
if "Series", expected inputs X are Series or Panel
Note: placeholder tag for upwards compatibility
currently only "Series" is supported
"scitype:transform-output" tag with values "Primitives", or "Series"
this determines type of output of transform
if "Primitives", output is pd.DataFrame with as many rows as X has instances
i-th instance of X is transformed into i-th row of output
if "Series", output is a Series or Panel, with as many instances as X
i-th instance of X is transformed into i-th instance of output
Series are treated as one-instance-Panels
if Series is input, output is a 1-row pd.DataFrame or a Series
"scitype:instancewise" tag which is boolean
if True, fit/transform is statistically independent by instance
Scitype defining methods:
fitting - fit(self, X, y=None)
transform - transform(self, X, y=None)
fit&transform - fit_transform(self, X, y=None)
updating - update(self, X, y=None)
Inspection methods:
hyper-parameter inspection - get_params()
fitted parameter inspection - get_fitted_params()
State:
fitted model/strategy - by convention, any attributes ending in "_"
fitted state flag - is_fitted (property)
fitted state inspection - check_is_fitted()
"""
__author__ = ["mloning, fkiraly"]
__all__ = [
"BaseTransformer",
"_SeriesToPrimitivesTransformer",
"_SeriesToSeriesTransformer",
"_PanelToTabularTransformer",
"_PanelToPanelTransformer",
]
import warnings
from typing import Union
import numpy as np
import pandas as pd
from sklearn.base import clone
from sktime.base import BaseEstimator
from sktime.datatypes import check_is_mtype, convert_to, mtype, mtype_to_scitype
from sktime.datatypes._series_as_panel import (
convert_Panel_to_Series,
convert_Series_to_Panel,
)
# single/multiple primitives
Primitive = Union[np.integer, int, float, str]
Primitives = np.ndarray
# tabular/cross-sectional data
Tabular = Union[pd.DataFrame, np.ndarray] # 2d arrays
# univariate/multivariate series
UnivariateSeries = Union[pd.Series, np.ndarray]
MultivariateSeries = Union[pd.DataFrame, np.ndarray]
Series = Union[UnivariateSeries, MultivariateSeries]
# panel/longitudinal/series-as-features data
Panel = Union[pd.DataFrame, np.ndarray] # 3d or nested array
def _coerce_to_list(obj):
"""Return [obj] if obj is not a list, otherwise obj."""
if not isinstance(obj, list):
return [obj]
else:
return obj
class BaseTransformer(BaseEstimator):
"""Transformer base class."""
# default tag values - these typically make the "safest" assumption
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:transform-labels": "None",
# what is the scitype of y: None (not needed), Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"capability:inverse_transform": False, # can the transformer inverse transform?
"univariate-only": False, # can the transformer handle multivariate X?
"handles-missing-data": False, # can estimator handle missing data?
"X_inner_mtype": "pd.DataFrame", # which mtypes do _fit/_predict support for X?
# this can be a Panel mtype even if transform-input is Series, vectorized
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for y?
"X-y-must-have-same-index": False, # can estimator handle different X/y index?
"requires_y": False, # does y need to be passed in fit?
"enforce_index_type": None, # index type that needs to be enforced in X/y
"fit-in-transform": True, # is fit empty and can be skipped? Yes = True
"transform-returns-same-time-index": False,
# does transform return have the same time index as input X
"skip-inverse-transform": False, # is inverse-transform skipped when called?
}
# allowed mtypes for transformers - Series and Panel
ALLOWED_INPUT_MTYPES = [
"pd.Series",
"pd.DataFrame",
"np.ndarray",
"nested_univ",
"numpy3D",
# "numpyflat",
"pd-multiindex",
# "pd-wide",
# "pd-long",
"df-list",
]
def fit(self, X, y=None, Z=None):
"""Fit transformer to X, optionally to y.
State change:
Changes state to "fitted".
Writes to self:
Sets is_fitted flag to True.
Sets fitted model attributes ending in "_".
Parameters
----------
X : Series or Panel, any supported mtype
Data to fit transform to, of python type as follows:
Series: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel: pd.DataFrame with 2-level MultiIndex, list of pd.DataFrame,
nested pd.DataFrame, or pd.DataFrame in long/wide format
subject to sktime mtype format specifications, for further details see
examples/AA_datatypes_and_datasets.ipynb
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Z : possible alias for X; should not be passed when X is passed
alias Z is deprecated since version 0.10.0 and will be removed in 0.11.0
Returns
-------
self : a fitted instance of the estimator
"""
X = _handle_alias(X, Z)
self._is_fitted = False
# skip everything if fit-in-transform is True
if self.get_tag("fit-in-transform"):
self._is_fitted = True
return self
# input checks and minor coercions on X, y
###########################################
if self.get_tag("requires_y") and y is None:
raise ValueError(f"{self.__class__.__name__} requires `y` in `fit`.")
valid, msg, X_metadata = check_is_mtype(
X, mtype=self.ALLOWED_INPUT_MTYPES, return_metadata=True, var_name="X"
)
if not valid:
raise ValueError(msg)
# checking X
enforce_univariate = self.get_tag("univariate-only")
if enforce_univariate and not X_metadata["is_univariate"]:
raise ValueError("X must be univariate but is not")
# retrieve mtypes/scitypes of all objects
#########################################
X_input_scitype = X_metadata["scitype"]
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
X_inner_scitypes = mtype_to_scitype(X_inner_mtype, return_unique=True)
# treating Series vs Panel conversion for X
###########################################
# there are three cases to treat:
# 1. if the internal _fit supports X's scitype, move on to mtype conversion
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
# 3. internal only has Series but X is Panel: auto-vectorization over instances
# currently, this is enabled by conversion to df-list mtype
# auto-vectorization is not supported if y is passed
# individual estimators that vectorize over y must implement individually
# 1. nothing to do - simply don't enter any of the ifs below
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
if X_input_scitype == "Series" and "Series" not in X_inner_scitypes:
X = convert_Series_to_Panel(X)
# 3. internal only has Series but X is Panel: loop over instances
elif X_input_scitype == "Panel" and "Panel" not in X_inner_scitypes:
if y is not None and self.get_tag("y_inner_mtype") != "None":
raise ValueError(
f"{type(self).__name__} does not support Panel X if y is not None, "
f"since {type(self).__name__} supports only Series. "
"Auto-vectorization to extend Series X to Panel X can only be "
'carried out if y is None, or "y_inner_mtype" tag is "None". '
"Consider extending _fit and _transform to handle the following "
"input types natively: Panel X and non-None y."
)
X = convert_to(
X,
to_type="df-list",
as_scitype="Panel",
store=self._converter_store_X,
store_behaviour="reset",
)
# this fits one transformer per instance
self.transformers_ = [clone(self).fit(Xi) for Xi in X]
# recurse and leave function - recursion does input checks/conversion
# also set is_fitted flag to True since we leave function here
self._is_fitted = True
return self
X_inner, y_inner = self._convert_X_y(X, y)
# todo: uncomment this once Z is completely gone
# self._fit(X=X_inner, y=y_inner)
# less robust workaround until then
self._fit(X_inner, y_inner)
self._is_fitted = True
return self
def transform(self, X, y=None, Z=None):
"""Transform X and return a transformed version.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self._is_fitted
Parameters
----------
X : Series or Panel, any supported mtype
Data to be transformed, of python type as follows:
Series: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel: pd.DataFrame with 2-level MultiIndex, list of pd.DataFrame,
nested pd.DataFrame, or pd.DataFrame in long/wide format
subject to sktime mtype format specifications, for further details see
examples/AA_datatypes_and_datasets.ipynb
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Z : possible alias for X; should not be passed when X is passed
alias Z is deprecated since version 0.10.0 and will be removed in 0.11.0
Returns
-------
transformed version of X
type depends on type of X and scitype:transform-output tag:
| | `transform` | |
| `X` | `-output` | type of return |
|----------|--------------|------------------------|
| `Series` | `Primitives` | `pd.DataFrame` (1-row) |
| `Panel` | `Primitives` | `pd.DataFrame` |
| `Series` | `Series` | `Series` |
| `Panel` | `Series` | `Panel` |
| `Series` | `Panel` | `Panel` |
instances in return correspond to instances in `X`
combinations not in the table are currently not supported
Explicitly, with examples:
if `X` is `Series` (e.g., `pd.DataFrame`) and `transform-output` is `Series`
then the return is a single `Series` of the same mtype
Example: detrending a single series
if `X` is `Panel` (e.g., `pd-multiindex`) and `transform-output` is `Series`
then the return is `Panel` with same number of instances as `X`
(the transformer is applied to each input Series instance)
Example: all series in the panel are detrended individually
if `X` is `Series` or `Panel` and `transform-output` is `Primitives`
then the return is `pd.DataFrame` with as many rows as instances in `X`
Example: i-th row of the return has mean and variance of the i-th series
if `X` is `Series` and `transform-output` is `Panel`
then the return is a `Panel` object of type `pd-multiindex`
Example: i-th instance of the output is the i-th window running over `X`
"""
X = _handle_alias(X, Z)
# check whether is fitted
self.check_is_fitted()
# input checks and minor coercions on X, y
###########################################
valid, msg, X_metadata = check_is_mtype(
X, mtype=self.ALLOWED_INPUT_MTYPES, return_metadata=True, var_name="X"
)
if not valid:
raise ValueError(msg)
# checking X
enforce_univariate = self.get_tag("univariate-only")
if enforce_univariate and not X_metadata["is_univariate"]:
raise ValueError("X must be univariate but is not")
# retrieve mtypes/scitypes of all objects
#########################################
X_input_mtype = X_metadata["mtype"]
X_input_scitype = X_metadata["scitype"]
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
X_inner_scitypes = mtype_to_scitype(X_inner_mtype, return_unique=True)
# treating Series vs Panel conversion for X
###########################################
# there are three cases to treat:
# 1. if the internal _fit supports X's scitype, move on to mtype conversion
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
# 3. internal only has Series but X is Panel: loop over instances
# currently this is enabled by conversion to df-list mtype
# and this does not support y (unclear what should happen here)
# 1. nothing to do - simply don't enter any of the ifs below
# the "ifs" for case 2 and 3 below are skipped under the condition
# X_input_scitype in X_inner_scitypes
# case 2 has an "else" which remembers that it wasn't entered
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
if (
X_input_scitype == "Series"
and "Series" not in X_inner_scitypes
and "Panel" in X_inner_scitypes
):
# convert the Series X to a one-element Panel
X = convert_Series_to_Panel(X)
# remember that we converted the Series to a one-element Panel
X_was_Series = True
else:
# remember that we didn't convert a Series to a one-element Panel
X_was_Series = False
# 3. internal only has Series but X is Panel: loop over instances
if (
X_input_scitype == "Panel"
and "Panel" not in X_inner_scitypes
and "Series" in X_inner_scitypes
):
Xt = self._vectorized_transform(X, X_input_mtype, y=y)
return Xt
# convert X/y to supported inner type, if necessary
###################################################
X_inner, y_inner = self._convert_X_y(X, y)
# carry out the transformation
###################################################
# todo: uncomment this once Z is completely gone
# Xt = self._transform(X=X_inner, y=y_inner)
# less robust workaround until then
Xt = self._transform(X_inner, y_inner)
# convert transformed X back to input mtype
###########################################
Xt = self._convert_output(Xt, X_input_mtype, X_was_Series)
return Xt
def fit_transform(self, X, y=None, Z=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
State change:
Changes state to "fitted".
Writes to self:
Sets is_fitted flag to True.
Sets fitted model attributes ending in "_".
Parameters
----------
X : Series or Panel, any supported mtype
Data to be transformed, of python type as follows:
Series: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel: pd.DataFrame with 2-level MultiIndex, list of pd.DataFrame,
nested pd.DataFrame, or pd.DataFrame in long/wide format
subject to sktime mtype format specifications, for further details see
examples/AA_datatypes_and_datasets.ipynb
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Z : possible alias for X; should not be passed when X is passed
alias Z is deprecated since version 0.10.0 and will be removed in 0.11.0
Returns
-------
transformed version of X
type depends on type of X and scitype:transform-output tag:
| `X` | `tf-output` | type of return |
|----------|--------------|------------------------|
| `Series` | `Primitives` | `pd.DataFrame` (1-row) |
| `Panel` | `Primitives` | `pd.DataFrame` |
| `Series` | `Series` | `Series` |
| `Panel` | `Series` | `Panel` |
| `Series` | `Panel` | `Panel` |
instances in return correspond to instances in `X`
combinations not in the table are currently not supported
Explicitly, with examples:
if `X` is `Series` (e.g., `pd.DataFrame`) and `transform-output` is `Series`
then the return is a single `Series` of the same mtype
Example: detrending a single series
if `X` is `Panel` (e.g., `pd-multiindex`) and `transform-output` is `Series`
then the return is `Panel` with same number of instances as `X`
(the transformer is applied to each input Series instance)
Example: all series in the panel are detrended individually
if `X` is `Series` or `Panel` and `transform-output` is `Primitives`
then the return is `pd.DataFrame` with as many rows as instances in `X`
Example: i-th row of the return has mean and variance of the i-th series
if `X` is `Series` and `transform-output` is `Panel`
then the return is a `Panel` object of type `pd-multiindex`
Example: i-th instance of the output is the i-th window running over `X`
"""
X = _handle_alias(X, Z)
# Non-optimized default implementation; override when a better
# method is possible for a given algorithm.
return self.fit(X, y).transform(X, y)
def inverse_transform(self, X, y=None, Z=None):
"""Inverse transform X and return an inverse transformed version.
Currently it is assumed that only transformers with tags
"scitype:transform-input"="Series", "scitype:transform-output"="Series",
have an inverse_transform.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self._is_fitted
Parameters
----------
X : Series or Panel, any supported mtype
Data to be inverse transformed, of python type as follows:
Series: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel: pd.DataFrame with 2-level MultiIndex, list of pd.DataFrame,
nested pd.DataFrame, or pd.DataFrame in long/wide format
subject to sktime mtype format specifications, for further details see
examples/AA_datatypes_and_datasets.ipynb
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Z : possible alias for X; should not be passed when X is passed
alias Z is deprecated since version 0.10.0 and will be removed in 0.11.0
Returns
-------
inverse transformed version of X
of the same type as X, and conforming to mtype format specifications
"""
if not self.get_tag("capability:inverse_transform"):
raise NotImplementedError(
f"{type(self)} does not implement inverse_transform"
)
X = _handle_alias(X, Z)
# check whether is fitted
self.check_is_fitted()
# input checks and minor coercions on X, y
###########################################
valid, msg, X_metadata = check_is_mtype(
X, mtype=self.ALLOWED_INPUT_MTYPES, return_metadata=True, var_name="X"
)
if not valid:
raise ValueError(msg)
# checking X
enforce_univariate = self.get_tag("univariate-only")
if enforce_univariate and not X_metadata["is_univariate"]:
raise ValueError("X must be univariate but is not")
# retrieve mtypes/scitypes of all objects
#########################################
X_input_mtype = X_metadata["mtype"]
X_input_scitype = X_metadata["scitype"]
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
X_inner_scitypes = mtype_to_scitype(X_inner_mtype, return_unique=True)
# treating Series vs Panel conversion for X
###########################################
# there are three cases to treat:
# 1. if the internal _fit supports X's scitype, move on to mtype conversion
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
# 3. internal only has Series but X is Panel: loop over instances
# currently this is enabled by conversion to df-list mtype
# and this does not support y (unclear what should happen here)
# 1. nothing to do - simply don't enter any of the ifs below
# the "ifs" for case 2 and 3 below are skipped under the condition
# X_input_scitype in X_inner_scitypes
# case 2 has an "else" which remembers that it wasn't entered
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
if (
X_input_scitype == "Series"
and "Series" not in X_inner_scitypes
and "Panel" in X_inner_scitypes
):
# convert the Series X to a one-element Panel
X = convert_Series_to_Panel(X)
# remember that we converted the Series to a one-element Panel
X_was_Series = True
else:
# remember that we didn't convert a Series to a one-element Panel
X_was_Series = False
# 3. internal only has Series but X is Panel: loop over instances
if (
X_input_scitype == "Panel"
and "Panel" not in X_inner_scitypes
and "Series" in X_inner_scitypes
):
Xt = self._vectorized_transform(X, X_input_mtype, y=y, inverse=True)
return Xt
# convert X/y to supported inner type, if necessary
###################################################
X_inner, y_inner = self._convert_X_y(X, y)
# carry out the transformation
###################################################
# todo: uncomment this once Z is completely gone
# Xt = self._transform(X=X_inner, y=y_inner)
# less robust workaround until then
Xt = self._inverse_transform(X_inner, y_inner)
# convert transformed X back to input mtype
###########################################
Xt = self._convert_output(Xt, X_input_mtype, X_was_Series, inverse=True)
return Xt
def update(self, X, y=None, Z=None, update_params=True):
"""Update transformer with X, optionally y.
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
self._is_fitted
Writes to self:
May update fitted model attributes ending in "_".
Parameters
----------
X : Series or Panel, any supported mtype
Data to fit transform to, of python type as follows:
Series: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel: pd.DataFrame with 2-level MultiIndex, list of pd.DataFrame,
nested pd.DataFrame, or pd.DataFrame in long/wide format
subject to sktime mtype format specifications, for further details see
examples/AA_datatypes_and_datasets.ipynb
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Z : possible alias for X; should not be passed when X is passed
alias Z is deprecated since version 0.10.0 and will be removed in 0.11.0
update_params : bool, default=True
whether the model is updated. Yes if true, if false, simply skips call.
argument exists for compatibility with forecasting module.
Returns
-------
self : a fitted instance of the estimator
"""
X = _handle_alias(X, Z)
# skip everything if update_params is False
if not update_params:
return self
# skip everything if fit-in-transform is True
if self.get_tag("fit-in-transform"):
return self
# input checks and minor coercions on X, y
###########################################
valid, msg, X_metadata = check_is_mtype(
X, mtype=self.ALLOWED_INPUT_MTYPES, return_metadata=True, var_name="X"
)
if not valid:
raise ValueError(msg)
# checking X
enforce_univariate = self.get_tag("univariate-only")
if enforce_univariate and not X_metadata["is_univariate"]:
raise ValueError("X must be univariate but is not")
# retrieve mtypes/scitypes of all objects
#########################################
X_input_scitype = X_metadata["scitype"]
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
X_inner_scitypes = mtype_to_scitype(X_inner_mtype, return_unique=True)
# treating Series vs Panel conversion for X
###########################################
# there are three cases to treat:
# 1. if the internal _fit supports X's scitype, move on to mtype conversion
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
# 3. internal only has Series but X is Panel: auto-vectorization over instances
# currently, this is enabled by conversion to df-list mtype
# auto-vectorization is not supported if y is passed
# individual estimators that vectorize over y must implement individually
# 1. nothing to do - simply don't enter any of the ifs below
# 2. internal only has Panel but X is Series: consider X as one-instance Panel
if X_input_scitype == "Series" and "Series" not in X_inner_scitypes:
X = convert_Series_to_Panel(X)
# 3. internal only has Series but X is Panel: loop over instances
elif X_input_scitype == "Panel" and "Panel" not in X_inner_scitypes:
if y is not None and self.get_tag("y_inner_mtype") != "None":
raise ValueError(
f"{type(self).__name__} does not support Panel X if y is not None, "
f"since {type(self).__name__} supports only Series. "
"Auto-vectorization to extend Series X to Panel X can only be "
'carried out if y is None, or "y_inner_mtype" tag is "None". '
"Consider extending _fit and _transform to handle the following "
"input types natively: Panel X and non-None y."
)
X = convert_to(
X,
to_type="df-list",
as_scitype="Panel",
store=self._converter_store_X,
store_behaviour="reset",
)
# this fits one transformer per instance
self.transformers_ = [clone(self).fit(Xi) for Xi in X]
# recurse and leave function - recursion does input checks/conversion
# also set is_fitted flag to True since we leave function here
self._is_fitted = True
return self
X_inner, y_inner = self._convert_X_y(X, y)
# todo: uncomment this once Z is completely gone
# self._update(X=X_inner, y=y_inner)
# less robust workaround until then
self._update(X_inner, y_inner)
return self
def _vectorized_transform(self, X, X_input_mtype=None, y=None, inverse=False):
"""Vectorized application of transform or inverse, and convert back."""
if X_input_mtype is None:
X_input_mtype = mtype(X, as_scitype=["Series", "Panel"])
if y is not None and self.get_tag("y_inner_mtype") != "None":
raise ValueError(
f"{type(self).__name__} does not support Panel X if y is not None, "
f"since {type(self).__name__} supports only Series. "
"Auto-vectorization to extend Series X to Panel X can only be "
'carried out if y is None, or "y_inner_mtype" tag is "None". '
"Consider extending _fit and _transform to handle the following "
"input types natively: Panel X and non-None y."
)
X = convert_to(
X,
to_type="df-list",
as_scitype="Panel",
store=self._converter_store_X,
store_behaviour="reset",
)
# depending on whether fitting happens, apply fitted or unfitted instances
if not self.get_tag("fit-in-transform"):
# these are the transformers-per-instance, fitted in fit
transformers = self.transformers_
if len(transformers) != len(X):
raise RuntimeError(
"found different number of instances in transform than in fit. "
f"number of instances seen in fit: {len(transformers)}; "
f"number of instances seen in transform: {len(X)}"
)
if inverse:
Xt = [transformers[i].inverse_transform(X[i]) for i in range(len(X))]
else:
Xt = [transformers[i].transform(X[i]) for i in range(len(X))]
# now we have a list of transformed instances
else:
# if no fitting happens, just apply transform multiple times
if inverse:
Xt = [self.inverse_transform(X[i]) for i in range(len(X))]
else:
Xt = [self.transform(X[i]) for i in range(len(X))]
# convert to expected output format
###################################
if inverse:
output_scitype = self.get_tag("scitype:transform-input")
else:
output_scitype = self.get_tag("scitype:transform-output")
# if the output is Series, Xt is a Panel and we convert back
if output_scitype == "Series":
Xt = convert_to(
Xt,
to_type=X_input_mtype,
as_scitype="Panel",
store=self._converter_store_X,
store_behaviour="freeze",
)
# if the output is Primitives, we have a list of one-row dataframes
# we concatenate those and overwrite the index with that of X
elif output_scitype == "Primitives":
Xt = pd.concat(Xt)
Xt = Xt.reset_index(drop=True)
return Xt
def _convert_X_y(self, X, y):
"""Convert X, y to inner type."""
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
X_inner_scitypes = mtype_to_scitype(X_inner_mtype, return_unique=True)
y_inner_mtype = _coerce_to_list(self.get_tag("y_inner_mtype"))
X_mtype = mtype(X, as_scitype=["Series", "Panel"])
X_scitype = mtype_to_scitype(X_mtype)
# for debugging, exception if the conversion fails (this should never happen)
if X_scitype not in X_inner_scitypes:
raise RuntimeError("conversion of X to X_inner unsuccessful, unexpected")
# convert X/y to supported inner type, if necessary
###################################################
# subset to the mtypes that are of the same scitype as X/y
X_inner_mtype = [
mt for mt in X_inner_mtype if mtype_to_scitype(mt) == X_scitype
]
# convert X and y to a supported internal type
# if X/y type is already supported, no conversion takes place
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype=X_scitype,
store=self._converter_store_X,
store_behaviour="reset",
)
if y_inner_mtype != ["None"] and y is not None:
if X_scitype == "Series":
# y_possible_scitypes = ["Series"]
y_possible_scitypes = "Series"
elif X_scitype == "Panel":
# todo: change this back to Panel/Table once
# polymorphic convert_to is merged
# y_possible_scitypes = ["Panel", "Table"]
# y_possible_scitypes = ["Series", "Panel"]
y_possible_scitypes = "Table"
y_mtype = mtype(y, as_scitype=y_possible_scitypes)
y_scitype = mtype_to_scitype(y_mtype)
y_inner_mtype = [
mt for mt in y_inner_mtype if mtype_to_scitype(mt) == y_scitype
]
y_inner = convert_to(
y,
to_type=y_inner_mtype,
as_scitype=y_scitype,
)
else:
y_inner = None
return X_inner, y_inner
def _convert_output(self, X, X_input_mtype=None, X_was_Series=False, inverse=False):
"""Convert transform output to expected format."""
Xt = X
X_input_scitype = mtype_to_scitype(X_input_mtype)
if inverse:
# the output of inverse transform is equal to input of transform
output_scitype = self.get_tag("scitype:transform-input")
else:
output_scitype = self.get_tag("scitype:transform-output")
# if we converted Series to "one-instance-Panel", revert that
if X_was_Series and output_scitype == "Series":
Xt = convert_to(
Xt, to_type=["pd-multiindex", "numpy3D", "df-list"], as_scitype="Panel"
)
Xt = convert_Panel_to_Series(Xt)
if output_scitype == "Series":
# output mtype is input mtype
X_output_mtype = X_input_mtype
# exception to this: if the transformer outputs multivariate series,
# we cannot convert back to pd.Series, do pd.DataFrame instead then
# this happens only for Series, not Panel
if X_input_scitype == "Series":
_, _, metadata = check_is_mtype(
Xt,
["pd.DataFrame", "pd.Series", "np.ndarray"],
return_metadata=True,
)
if not metadata["is_univariate"] and X_input_mtype == "pd.Series":
X_output_mtype = "pd.DataFrame"
Xt = convert_to(
Xt,
to_type=X_output_mtype,
as_scitype=X_input_scitype,
store=self._converter_store_X,
store_behaviour="freeze",
)
elif output_scitype == "Primitives":
# we "abuse" the Series converter to ensure df output
# & reset index to have integers for instances
if isinstance(Xt, (pd.DataFrame, pd.Series)):
Xt = Xt.reset_index(drop=True)
Xt = convert_to(
Xt,
to_type="pd.DataFrame",
as_scitype="Series",
# no converter store since this is not a "1:1 back-conversion"
)
# else output_scitype is "Panel" and no need for conversion
return Xt
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _fit must support all types in it
Data to fit transform to
y : Series or Panel of mtype y_inner_mtype, default=None
Additional data, e.g., labels for tarnsformation
Returns
-------
self: a fitted instance of the estimator
See extension_templates/transformer.py for implementation details.
"""
# default fit is "no fitting happens"
return self
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _transform must support all types in it
Data to be transformed
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Returns
-------
transformed version of X
type depends on type of X and scitype:transform-output tag:
| | `transform` | |
| `X` | `-output` | type of return |
|----------|--------------|------------------------|
| `Series` | `Primitives` | `pd.DataFrame` (1-row) |
| `Panel` | `Primitives` | `pd.DataFrame` |
| `Series` | `Series` | `Series` |
| `Panel` | `Series` | `Panel` |
| `Series` | `Panel` | `Panel` |
instances in return correspond to instances in `X`
combinations not in the table are currently not supported
See extension_templates/transformer.py for implementation details.
"""
raise NotImplementedError("abstract method")
def _inverse_transform(self, X, y=None):
"""Inverse transform X and return an inverse transformed version.
private _inverse_transform containing core logic, called from inverse_transform
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _inverse_transform must support all types in it
Data to be transformed
y : Series or Panel, default=None
Additional data, e.g., labels for transformation
Returns
-------
inverse transformed version of X
of the same type as X, and conforming to mtype format specifications
See extension_templates/transformer.py for implementation details.
"""
raise NotImplementedError("abstract method")
def _update(self, X, y=None):
"""Update transformer with X and y.
private _update containing the core logic, called from update
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _update must support all types in it
Data to update transformer with
y : Series or Panel of mtype y_inner_mtype, default=None
Additional data, e.g., labels for tarnsformation
Returns
-------
self: a fitted instance of the estimator
See extension_templates/transformer.py for implementation details.
"""
# standard behaviour: no update takes place, new data is ignored
return self
def _handle_alias(X, Z):
"""Handle Z as an alias for X, return X/Z.
Parameters
----------
X: any object
Z: any object
Returns
-------
X if Z is None, Z if X is None
Raises
------
ValueError both X and Z are not None
"""
if Z is None:
return X
elif X is None:
msg = (
"argument Z will in transformers is deprecated since version 0.10.0 "
"and will be removed in version 0.11.0"
)
warnings.warn(msg, category=DeprecationWarning)
return Z
else:
raise ValueError("X and Z are aliases, at most one of them should be passed")
class _SeriesToPrimitivesTransformer(BaseTransformer):
"""Transformer base class for series to primitive(s) transforms."""
# class is temporary for downwards compatibility
# default tag values for "Series-to-Primitives"
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Primitives",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
}
class _SeriesToSeriesTransformer(BaseTransformer):
"""Transformer base class for series to series transforms."""
# class is temporary for downwards compatibility
# default tag values for "Series-to-Series"
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
}
class _PanelToTabularTransformer(BaseTransformer):
"""Transformer base class for panel to tabular transforms."""
# class is temporary for downwards compatibility
# default tag values for "Panel-to-Tabular"
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Primitives",
# what is the scitype of y: None (not needed), Primitives, Series, Panel
"scitype:instancewise": False, # is this an instance-wise transform?
"X_inner_mtype": "nested_univ", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
}
class _PanelToPanelTransformer(BaseTransformer):
"""Transformer base class for panel to panel transforms."""
# class is temporary for downwards compatibility
# default tag values for "Panel-to-Panel"
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": False, # is this an instance-wise transform?
"X_inner_mtype": "nested_univ", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6634,
25,
1341,
2435,
6505,
11,
347,
10305,
12,
18,
12,
2601,
682,
13789,
357,
3826,
38559,
24290,
2393,
8,
198,
37811,
198,
14881,
1398,
11055,
329,
6121,
364,
13,... | 2.338188 | 18,815 |
"""
The h5 module is organized like the resulting h5 file's groups.
""" | [
37811,
198,
464,
289,
20,
8265,
318,
8389,
588,
262,
7186,
289,
20,
2393,
338,
2628,
13,
198,
37811
] | 3.736842 | 19 |
import sys
import json
import argparse
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from reducers.topk import TopkReducer
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Top K reducer NBA',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--config',
help='The Net Topology configuration file'
)
parser.add_argument(
'--rid',
type=int,
default=1,
help="The Reducer id"
)
parser.add_argument(
'--workers',
type=int,
default=1,
help='The number of summary workers'
)
parser.add_argument(
'--k',
type=int,
default=10,
help='The k of the Top K'
)
args = parser.parse_args()
main(args.rid, args.workers, args.k, args.config)
| [
11748,
25064,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
6738,
28686,
1330,
3108,
198,
198,
17597,
13,
6978,
13,
33295,
7,
6978,
13,
15908,
3672,
7,
6978,
13,
15908,
3672,
7,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
35514,
... | 2.035417 | 480 |
import contextlib
from typing import Dict, Iterable, Optional, Tuple, List, Any, Type, Hashable, \
NamedTuple, Generator, Callable
from unittest import mock
from unittest.mock import MagicMock, Mock
import structlog
import pykube
import pytest
from _pytest.fixtures import FixtureRequest
from k8s_snapshots import kube, errors
from k8s_snapshots.context import Context
_logger = structlog.get_logger(__name__)
KUBE_SAFETY_CHECK_CONFIG_KEY = 'test-fixture-safety-check'
KUBE_CONFIG = pykube.KubeConfig({
'apiVersion': 'v1',
'kind': 'Config',
'clusters': [
{
'name': 'test-fixture-cluster',
'certificate-authority-data': 'From fixture fx_kube_config',
'server': 'http://test-fixture-server',
},
],
'contexts': [
{
'name': 'test-fixture-context',
'context': {
'cluster': 'test-fixture-cluster',
'user': 'test-fixture-user',
},
},
],
'current-context': 'test-fixture-context',
KUBE_SAFETY_CHECK_CONFIG_KEY: 'I am present',
})
LABEL_ZONE_VALUE = 'test-zone'
LABEL_ZONE_KEY = 'failure-domain.beta.kubernetes.io/zone'
LABEL_ZONE = {LABEL_ZONE_KEY: LABEL_ZONE_VALUE}
DELTAS_ANNOTATION = 'PT1M PT2M'
DEFAULT = object()
@pytest.fixture(scope='session', autouse=True)
@pytest.fixture(scope='session', autouse=True)
@pytest.fixture
def fx_kube_config(request: FixtureRequest) -> pykube.KubeConfig:
"""
Minimal fake pykube.HTTPClient config fixture.
"""
return KUBE_CONFIG
@contextlib.contextmanager
def mock_kube(resources: Iterable[kube.Resource]):
"""
Mock the resources available through the `k8s_snapshots.kube.Kubernetes`
abstraction.
Parameters
----------
resources
Returns
-------
The `k8s_snapshots.kube.Kubernetes` mock
"""
with MockKubernetes.patch(resources):
yield
def make_resource(
resource_type: Type[kube.Resource],
name,
namespace=DEFAULT,
labels=DEFAULT,
annotations=DEFAULT,
spec=DEFAULT,
) -> kube.Resource:
"""
Create a Kubernetes Resource.
"""
if namespace is DEFAULT:
namespace = 'default'
if annotations is DEFAULT:
annotations = {}
api = MagicMock(
spec=pykube.HTTPClient,
config=Mock()
)
if spec is DEFAULT:
spec = {}
obj = {
'metadata': {
'name': name,
'annotations': annotations,
'selfLink': f'test/{namespace}/{resource_type.endpoint}/{name}'
},
'spec': spec,
}
if labels is not DEFAULT:
obj['metadata']['labels'] = labels
if namespace is not DEFAULT:
obj['metadata']['namespace'] = namespace
return resource_type(api, obj)
def make_volume_and_claim(
ctx,
volume_name='test-pv',
claim_name='test-pvc',
volume_annotations=DEFAULT,
claim_annotations=DEFAULT,
claim_namespace=DEFAULT,
volume_zone_label=DEFAULT,
) -> Tuple[
pykube.objects.PersistentVolume,
pykube.objects.PersistentVolumeClaim
]:
"""
Creates
"""
if volume_zone_label is DEFAULT:
volume_zone_label = {LABEL_ZONE_KEY: LABEL_ZONE_VALUE}
pv = make_resource(
pykube.objects.PersistentVolume,
volume_name,
annotations=volume_annotations,
labels=volume_zone_label,
spec={
'claimRef': {
'name': claim_name,
'namespace': claim_namespace,
},
'gcePersistentDisk': {
'pdName': 'test-pd'
}
}
)
pvc = make_resource(
pykube.objects.PersistentVolumeClaim,
claim_name,
annotations=claim_annotations,
namespace=claim_namespace,
spec={
'volumeName': volume_name,
}
)
return pv, pvc
@pytest.fixture
@pytest.fixture
| [
11748,
4732,
8019,
198,
6738,
19720,
1330,
360,
713,
11,
40806,
540,
11,
32233,
11,
309,
29291,
11,
7343,
11,
4377,
11,
5994,
11,
21059,
540,
11,
3467,
198,
220,
220,
220,
34441,
51,
29291,
11,
35986,
11,
4889,
540,
198,
6738,
555,
... | 2.155219 | 1,849 |
import os
import sys
import subprocess
import traceback
class ConfigParams(object):
"""This class is used to create different confiuration space for jetson tx1
"""
def set_big_core_status(self, cpu_name, status):
"""This function is used set core status (enable or disable)
@input:
cpu_name: cpu that will be enabled or disabled
@returns:
boolean: whether the operation was successful or not
"""
if cpu_name!="cpu0":
filename="{0}{1}{2}".format("/sys/devices/system/cpu/",
cpu_name,
"/online"
)
cur_status=subprocess.getstatusoutput("cat {0}".format(filename))[1]
if cur_status!=status:
res=subprocess.call(["sudo","sh","./utils/change_core_status.sh",str(cpu_name),str(status)])
if res!=0:
err="subprocess command failed"
print("[CPU STATUS ERROR]: {0}".format(err))
return False
# check if the operation is successful
new_status= subprocess.getstatusoutput("cat {0}".format(filename))[1]
if new_status!=status:
print ("[CPU STATUS ERROR]: "+cpu_name+ "\n"
"expected: " + str(status) + "\n"
"actual: "+ str(new_status))
return False
return True
else:
print("invalid cpu_name argument")
def set_big_core_freq(self, cpu_name, frequency):
"""This function is used to set core frequency of one or more cores
@input:
frequency: clockspeed at what the cpu will be set
cpu_name: cpu number which will be set
@returns:
@returns:
boolean: status of operation
"""
frequency = int(frequency)
if frequency is not None:
filename="{0}{1}{2}".format("/sys/devices/system/cpu/",
cpu_name,
"/cpufreq/scaling_cur_freq")
cur_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
res=subprocess.call(["sudo","sh","./utils/change_core_frequency.sh",str(self.cur_sys),str(frequency),str(cur_freq)])
if res!=0:
err=traceback.print_exc()
print("[CPU FREQUENCY ERROR]: {0}".format(err))
return False
new_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
if str(new_freq)!=str(frequency):
print ("[CPU FREQUENCY ERROR]: "+cpu_name+ "\n"
"expected: " + str(frequency) + "\n"
"actual: "+ str(new_freq))
return False
return True
def set_gpu_freq(self, frequency):
"""This function is used to change gpu clockspeeds
@input:
frequency: the clockspeed at which the gpu will be set
@returns:
boolean: status of operation
"""
frequency = int(frequency)
if frequency is not None:
filename=self.cfg["systems"][self.cur_sys]["gpu"]["frequency"]["current"]
try:
if frequency is not None:
cur_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
res=subprocess.call(["sudo","sh","./utils/change_gpu_frequency.sh",str(self.cur_sys),str(frequency),str(cur_freq)])
if res!=0:
err=traceback.print_exc()
print("[GPU FREQUENCY ERROR]: {0}".format(err))
return False
# check if the operation is successful
new_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
if new_freq!=frequency:
print ("[GPU FREQUENCY ERROR]: \n"
"expected: " + str(frequency) + "\n"
"actual: "+ str(new_freq))
return False
return True
except AttributeError as e:
print("[GPU FREQUENCY ERROR: {0}]".format(e))
def set_emc_freq(self, frequency):
"""This function is used to change emmc clockspeeds
@input:
frequency: the clockspeed at which the emmc will be set
@returns:
boolean: status of operation
"""
#print ("emc frequency")
frequency = int(frequency)
if frequency is not None:
filename=self.cfg["systems"][self.cur_sys]["emc"]["frequency"]["current"]
try:
if frequency is not None:
cur_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
res=subprocess.call(["sudo","sh","./utils/change_emc_frequency.sh",str(self.cur_sys),str(frequency)])
if res!=0:
err=traceback.print_exc()
print("[EMC FREQUENCY ERROR]: {0}".format(err))
return False
# check if the operation is successful
new_freq=subprocess.getstatusoutput("cat {0}".format(filename))[1]
if new_freq!=frequency:
print ("[EMC FREQUENCY ERROR]: \n"
"expected: " + str(frequency) + "\n"
"actual: "+ str(new_freq))
return False
return True
except AttributeError as e:
print("[EMC FREQUENCY ERROR: {0}]".format(e))
def set_scheduler_policy(self, val):
""""This function is used to set scheduler policy"""
if val==0: os.system ("echo cfq > /sys/block/mmcblk0/queue/scheduler")
elif val==1: os.system ("echo noop > /sys/block/mmcblk0/queue/scheduler")
else: print("[ERROR]: Invalid policy value")
def set_cache_pressure(self, val):
"""This function is used to set cache pressure"""
os.system ("sysctl vm.vfs_cache_pressure={0}".format(val))
def set_swappiness(self, val):
"""This function is used to set swappiness value"""
os.system ("sysctl vm.swappiness={0}".format(val))
def set_dirty_bg_ratio(self, val):
"""This function is used to set dirty bg value"""
os.system ("sysctl vm.dirty_background_ratio={0}".format(val))
def set_dirty_ratio(self, val):
"""This function is used to set dirty ratio value"""
os.system ("sysctl vm.dirty_ratio={0}".format(val))
def set_drop_caches(self, val):
"""This function is used to set drop caches value"""
os.system ("sysctl vm.drop_caches={0}".format(val))
def set_sched_child_runs_first(self, val):
"""This function is used to set kernel.sched child runs first value"""
os.system ("sysctl kernel.sched_child_runs_first={0}".format(val))
def set_sched_rt_runtime_us(self, val):
"""This function is used to set sched rt runtime us value"""
os.system ("sysctl kernel.sched_rt_runtime_us={0}".format(val))
def set_nr_hugepages(self, val):
"""This function is used to set nr hugepages value"""
os.system ("sysctl vm.nr_hugepages={0}".format(val))
def set_overcommit_ratio(self, val):
"""This function is used to set overcommit ratio value"""
os.system ("sysctl vm.overcommit_ratio={0}".format(val))
def set_overcommit_memory(self, val):
"""This function is used to set overcommit memory value"""
os.system ("sysctl vm.overcommit_memory={0}".format(val))
def set_overcommit_hugepages(self, val):
"""This function is used to set overcommit hugepages value"""
os.system ("sysctl vm.overcommit_hugepages={0}".format(val))
def set_max_pids(self, val):
"""This function is used to set max pids value"""
os.system ("sysctl user.max_pid_namespaces={0}".format(val))
def set_sched_nr_migrate(self, val):
"""This function is used to set sched nr migrate value"""
os.system ("sysctl kernel.sched_nr_migrate={0}".format(val))
def set_sched_time_avg_ms(self, val):
"""This function is used to set sched nr migrate value"""
os.system ("sysctl kernel.sched_time_avg_ms={0}".format(val))
def set_cpu_time_max_percent(self, val):
"""This function is used to set cpu time max percent value"""
os.system ("sysctl kernel.cpu_time_max_percent={0}".format(val))
| [
11748,
28686,
220,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
12854,
1891,
198,
198,
4871,
17056,
10044,
4105,
7,
15252,
2599,
198,
220,
220,
220,
37227,
1212,
1398,
318,
973,
284,
2251,
1180,
1013,
72,
3924,
2272,
329,
2079... | 1.999554 | 4,483 |
from django.urls import path
from .views import index, InsertSampleDataView, \
GenreListView, GenreDetailView, GenreUpdateView, GenreDeleteView, GenreCreateView, \
AuthorListView, AuthorDetailView, AuthorUpdateView, AuthorDeleteView, AuthorCreateView, \
ArtworkListView, ArtworkDetailView, ArtworkUpdateView, ArtworkDeleteView, \
ArtworkWizard
from .preview import GenreFormPreview
from .forms import GenreForm
urlpatterns = [
path('', index, name='index'),
path('insert-sample-data', InsertSampleDataView.as_view(), name='insert-sample-data'),
path('genres', GenreListView.as_view(), name='genres'),
path('genres/create', GenreCreateView.as_view(), name='genre-create'),
path('genres/<int:pk>/view', GenreDetailView.as_view(), name='genre-detail-view'),
path('genres/<int:pk>/update', GenreUpdateView.as_view(), name='genre-update'),
path('genres/<int:pk>/delete', GenreDeleteView.as_view(), name='genre-delete'),
path('authors', AuthorListView.as_view(), name='authors'),
path('authors/create', AuthorCreateView.as_view(), name='author-create'),
path('authors/<int:pk>/view', AuthorDetailView.as_view(), name='author-detail-view'),
path('authors/<int:pk>/update', AuthorUpdateView.as_view(), name='author-update'),
path('authors/<int:pk>/delete', AuthorDeleteView.as_view(), name='author-delete'),
path('artworks', ArtworkListView.as_view(), name='artworks'),
path('artworks/<int:pk>/view', ArtworkDetailView.as_view(), name='artwork-detail-view'),
path('artworks/<int:pk>/delete', ArtworkDeleteView.as_view(), name='artwork-delete'),
# formtools FormPreview
path('genres/formtools-preview', GenreFormPreview(GenreForm), name='genre-formtools-preview'),
# formtools Artwork wizard
path('artworks/create', ArtworkWizard.as_view(ArtworkWizard.FORMS), name='artwork-create-wizard'),
path('artworks/<int:pk>/update', ArtworkWizard.as_view(ArtworkWizard.FORMS), name='artwork-update-wizard'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
1330,
6376,
11,
35835,
36674,
6601,
7680,
11,
3467,
198,
220,
220,
220,
5215,
260,
8053,
7680,
11,
5215,
260,
11242,
603,
7680,
11,
5215,
260,
10260,
7680,
11,
5215,... | 2.829303 | 703 |
#\\ --- backslash
print("I am here for the backslash\\")
| [
2,
6852,
11420,
736,
6649,
1077,
201,
198,
4798,
7203,
40,
716,
994,
329,
262,
736,
6649,
1077,
6852,
4943,
201,
198
] | 2.681818 | 22 |
from django.contrib import admin
from applications.office_panel.models import Patient
admin.site.register(Patient)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
5479,
13,
31810,
62,
35330,
13,
27530,
1330,
35550,
198,
198,
28482,
13,
15654,
13,
30238,
7,
12130,
1153,
8,
198
] | 3.65625 | 32 |
from catalog.tasks import scan_catalog, update_catalog
from chibi_gob_mx import catalog
import datetime
import unittest
from unittest.mock import patch, Mock
from catalog.factories import (
Catalog as Catalog_factory,
Catalog_with_id as Catalog_with_id_factory
)
from catalog.models import Catalog_pulse, Catalog
| [
6738,
18388,
13,
83,
6791,
1330,
9367,
62,
9246,
11794,
11,
4296,
62,
9246,
11794,
198,
6738,
442,
27567,
62,
44270,
62,
36802,
1330,
18388,
628,
198,
198,
11748,
4818,
8079,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
... | 3.215686 | 102 |
# Edinburgh bus tracker - simple example output to a shell
# (c) Mark Pentler 2017
from edinbustrack import *
import os
from time import sleep
# setup our variables first. stop id and url are definable in case they change them
stop_id = "36232626" # grabbed from the mybustracker website
stop_name = get_stop_name(stop_id)
while True:
services = get_bus_times(stop_id) # update our service list
os.system("clear")
print "Next departures from " + stop_name + " - CTRL-C to exit"
print "---------------------"
print "Service\t\tMins"
print "---------------------"
for id, service, mins in services: # iterate through the list
print service + "\t\t" + mins
sleep(30) # wait before updating again
| [
2,
23475,
1323,
30013,
532,
2829,
1672,
5072,
284,
257,
7582,
198,
2,
357,
66,
8,
2940,
9696,
1754,
2177,
198,
198,
6738,
1225,
259,
65,
436,
39638,
1330,
1635,
198,
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
2,
9058,
674,
... | 3.269406 | 219 |
""" Unittests for nodes.feature_generation"""
| [
37811,
791,
715,
3558,
329,
13760,
13,
30053,
62,
20158,
37811,
198
] | 3.833333 | 12 |
import numpy as np
import pandas as pd
from sklearn.gaussian_process.kernels import RBF, ExpSineSquared
from darts.models import GaussianProcessFilter
from darts.models.filtering.moving_average import MovingAverage
from darts.models.filtering.kalman_filter import KalmanFilter
from darts import TimeSeries
from darts.utils import timeseries_generation as tg
from darts.tests.base_test_class import DartsBaseTestClass
if __name__ == "__main__":
KalmanFilterTestCase().test_kalman()
MovingAverageTestCase().test_moving_average_univariate()
MovingAverageTestCase().test_moving_average_multivariate()
GaussianProcessFilterTestCase().test_gaussian_process()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
4908,
31562,
62,
14681,
13,
74,
44930,
1330,
17986,
37,
11,
5518,
50,
500,
22266,
1144,
198,
198,
6738,
47807,
13,
27530,
1330,
12822,
31... | 3.414141 | 198 |
import torch.nn as nn
from .base_model import Up_Conv_Block
class Decoder(nn.Module):
"""
Args:
N_p (int): The sum of the poses
N_z (int): The dimensions of the noise
"""
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
764,
8692,
62,
19849,
1330,
3205,
62,
3103,
85,
62,
12235,
628,
198,
4871,
34580,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
943,
14542,
25,
198,
220,
... | 2.364706 | 85 |
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List
import tensorflow as tf
class CriteoTsvReader:
"""
Input reader for pre-processed Criteo data.
Raw Criteo data is assumed to be preprocessed in the following way:
1. Missing values are replaced with zeros.
2. Negative values are replaced with zeros.
3. Integer features are transformed by log(x+1) and are hence tf.float32.
4. Categorical data is bucketized and are hence tf.int32
"""
if __name__ == "__main__":
dataset = CriteoTsvReader(file_pattern=r"./train/*",
num_dense_features=13,
vocab_sizes=[39884407, 39043, 17289, 7420, 20263,
3, 7120, 1543, 63, 38532952, 2953546,
403346, 10, 2208, 11938, 155, 4, 976,
14, 39979772, 25641295, 39664985, 585935,
12972, 108, 36],
batch_size=16384,
sharding=False)()
for step, (features, labels) in enumerate(dataset):
# print(features)
print(labels)
| [
37811,
198,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
220,
198,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.26087 | 782 |
# -*- coding: utf-8 -*-
"""
Detect genes using blast
Revision history:
----------------
2020-02-13: Amromics created
"""
from __future__ import division, print_function, absolute_import
import subprocess
import os, shutil, glob
import re
import sys
import argparse
def blast(sample,db,output, identity=90, threads=1, mincov=0,dbtype='nucl'):
"""
Call blastn with params
:param query_file (in fasta), db (blast indexed db), number of threads and identity
:return: list BLASTFields objects
"""
#check db is indexed
#dbfile=os.path.join(db_folder, 'sequences')
# run blastn
cmd ='blastn -query {query} -task blastn -dust no -perc_identity {identity} -db {db} -outfmt \'6 qseqid qstart qend qlen sseqid sstart send slen sstrand evalue length pident gaps gapopen stitle\' -num_threads {threads} -evalue 1E-20 -culling_limit 1 > temp.tab'.format(
query=sample,
identity=identity,
db=db,
threads=threads
)
if dbtype=='prot':
cmd ='blastp -query {query} -task blastp -db {db} -outfmt \'6 qseqid qstart qend qlen sseqid sstart send slen sstrand evalue length pident gaps gapopen stitle\' -num_threads {threads} -evalue 1E-20 > temp.tab'.format(
query=sample,
db=db,
threads=threads
)
print(cmd)
os.system(cmd)
#parse result
f=open('temp.tab')
line = f.readline()
result=[]
while line:
#result.append(line)
t=line.strip().split('\t')
blast_fields={'qseqid':t[0], 'qstart':t[1], 'qend':t[2], 'qlen':t[3],\
'sseqid':t[4], 'sstart':t[5], 'send':t[6], 'slen':t[7], 'sstrand':t[8],\
'evalue':t[9], 'length':t[10], 'pident':t[11], 'gaps':t[12], 'gapopen':t[13],\
'stitle':t[14]}
result.append(blast_fields)
line = f.readline()
f.close()
if os.path.exists('temp.tab'):
os.remove('temp.tab')
ret=[]
for s in result:
pccov = 100 * (int(s['length'])-int(s['gaps'])) / int(s['slen'])
if pccov<=mincov:
continue
ret.append(s)
return ret
def setupdb():
"""
make blast database from fasta file in db folder,
:param : fasta file (with folder'folder is the name of db and filename is 'sequences')
:return:
"""
#get name of db from file path:
for root, dirs, files in os.walk('db'):
for _file in files:
if _file.endswith(('sequences')):
name=os.path.basename(str(root))
#print (name)
seqfile=str(root)+'/'+_file
dbtype=sequence_type(seqfile)
cmd="makeblastdb -in {path} -title {name} -dbtype {type} -logfile /dev/null".format(
path=seqfile,
name=name,
type=dbtype
)
print (cmd)
os.system(cmd)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
35874,
10812,
1262,
11975,
628,
198,
18009,
1166,
2106,
25,
198,
1783,
198,
42334,
12,
2999,
12,
1485,
25,
1703,
398,
873,
2727,
198,
198,
3... | 2.099928 | 1,391 |
#############################################################################
# Copyright (c) 2018 Eli Polonsky. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
#############################################################################
import pytest
from twine.commands import upload
from pyci.shell import secrets
from pyci.api import exceptions
from pyci.api.publish.pypi import PyPI
from pyci.tests import utils as test_utils
| [
29113,
29113,
7804,
4242,
2,
198,
2,
15069,
357,
66,
8,
2864,
25204,
2165,
684,
2584,
13,
1439,
2489,
10395,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
40... | 4.024896 | 241 |
from hamcrest import *
try:
except TypeError:
print 'Object class defined at ' + getattr(object, '__file__', 'NOWHERE')
raise
| [
6738,
8891,
66,
2118,
1330,
1635,
198,
198,
28311,
25,
198,
16341,
5994,
12331,
25,
198,
220,
220,
220,
3601,
705,
10267,
1398,
5447,
379,
705,
1343,
651,
35226,
7,
15252,
11,
705,
834,
7753,
834,
3256,
705,
45669,
39,
9338,
11537,
... | 2.8125 | 48 |
# -*- coding: utf-8 -*-
'''
File name: code\lowestcost_search\sol_328.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #328 :: Lowest-cost Search
#
# For more information see:
# https://projecteuler.net/problem=328
# Problem Statement
'''
We are trying to find a hidden number selected from the set of integers {1, 2, ..., n} by asking questions.
Each number (question) we ask, has a cost equal to the number asked and we get one of three possible answers: "Your guess is lower than the hidden number", or
"Yes, that's it!", or
"Your guess is higher than the hidden number".
Given the value of n, an optimal strategy minimizes the total cost (i.e. the sum of all the questions asked) for the worst possible case. E.g.
If n=3, the best we can do is obviously to ask the number "2". The answer will immediately lead us to find the hidden number (at a total cost = 2).
If n=8, we might decide to use a "binary search" type of strategy: Our first question would be "4" and if the hidden number is higher than 4 we will need one or two additional questions.
Let our second question be "6". If the hidden number is still higher than 6, we will need a third question in order to discriminate between 7 and 8.
Thus, our third question will be "7" and the total cost for this worst-case scenario will be 4+6+7=17.
We can improve considerably the worst-case cost for n=8, by asking "5" as our first question.
If we are told that the hidden number is higher than 5, our second question will be "7", then we'll know for certain what the hidden number is (for a total cost of 5+7=12).
If we are told that the hidden number is lower than 5, our second question will be "3" and if the hidden number is lower than 3 our third question will be "1", giving a total cost of 5+3+1=9.
Since 12>9, the worst-case cost for this strategy is 12. That's better than what we achieved previously with the "binary search" strategy; it is also better than or equal to any other strategy.
So, in fact, we have just described an optimal strategy for n=8.
Let C(n) be the worst-case cost achieved by an optimal strategy for n, as described above.
Thus C(1) = 0, C(2) = 1, C(3) = 2 and C(8) = 12.
Similarly, C(100) = 400 and C(n) = 17575.
Find C(n).
'''
# Solution
# Solution Approach
'''
'''
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
9220,
1438,
25,
2438,
59,
9319,
395,
15805,
62,
12947,
59,
34453,
62,
34256,
13,
9078,
198,
220,
220,
220,
6434,
25,
569,
1698,
291... | 3.44152 | 684 |
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
| [
11748,
6333,
263,
198,
11748,
6333,
263,
13,
12543,
2733,
355,
376,
198,
11748,
6333,
263,
13,
28751,
355,
406,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.333333 | 27 |
import pytest
from foreshadow.cachemanager import CacheManager
from foreshadow.utils import get_transformer
from foreshadow.utils.testing import get_file_path
@pytest.fixture()
def smart_child():
"""Get a defined SmartTransformer subclass, TestSmartTransformer.
Note:
Always returns StandardScaler.
"""
from foreshadow.smart import SmartTransformer
from foreshadow.concrete import StandardScaler
yield TestSmartTransformer
def test_smarttransformer_instantiate():
"""Instantiating a SmartTransformer should fail"""
from foreshadow.smart import SmartTransformer
# Note: cannot use fixture since this is not a subclass of SmartTransformer
with pytest.raises(TypeError) as e:
SmartTransformer()
assert "Can't instantiate abstract class" in str(e.value)
def test_smarttransformer_notsubclassed():
"""SmartTransformer (get_transformer TypeError) not being implemented."""
from foreshadow.smart.smart import SmartTransformer
# Note: cannot use fixture since the metaclass implementation sets flags on
# class definition time.
with pytest.raises(TypeError) as e:
TestSmartTransformer()
assert "Can't instantiate abstract class" in str(e.value)
def test_smarttransformer_invalidtransformer(smart_child, mocker):
"""Test SmartTransformer initialization """
import pandas as pd
boston_path = get_file_path("data", "boston_housing.csv")
df = pd.read_csv(boston_path)
smart = smart_child()
smart.pick_transformer = mocker.Mock()
smart.pick_transformer.return_value = InvalidClass()
with pytest.raises(ValueError) as e:
smart.fit(df[["crim"]])
assert (
"is neither a scikit-learn Pipeline, FeatureUnion, a "
"wrapped foreshadow transformer, nor None."
) in str(e.value)
def test_smarttransformer_function(smart_child):
"""Test overall SmartTransformer functionality
Args:
smart_child: A subclass of SmartTransformer.
"""
import numpy as np
import pandas as pd
from foreshadow.concrete import StandardScaler
boston_path = get_file_path("data", "boston_housing.csv")
df = pd.read_csv(boston_path)
smart = smart_child(cache_manager=CacheManager())
smart_data = smart.fit_transform(df[["crim"]])
std = StandardScaler()
std_data = std.fit_transform(df[["crim"]])
assert smart_data.equals(std_data)
smart.fit(df[["crim"]])
smart_data = smart.transform(df[["crim"]])
std.fit(df[["crim"]])
std_data = std.transform(df[["crim"]])
# TODO, remove when SmartTransformer is no longer wrapped
# Column names will be different, thus np.allclose() is used
assert np.allclose(smart_data, std_data)
def test_smarttransformer_fitself(smart_child, mocker):
"""Test that fit returns self.
This is important so that .fit().transform()
Args:
smart_child: A subclass of SmartTransformer
"""
import pandas as pd
smart = smart_child(override="Imputer", name="test")
assert smart.fit(pd.DataFrame([1, 2, 3])) == smart
def test_smarttransformer_function_override(smart_child):
"""Test SmartTransformer override through parameter specification.
Args:
smart_child: A subclass of SmartTransformer.
"""
import numpy as np
import pandas as pd
from foreshadow.concrete import SimpleImputer
boston_path = get_file_path("data", "boston_housing.csv")
df = pd.read_csv(boston_path)
smart = smart_child(
transformer="SimpleImputer",
name="impute",
cache_manager=CacheManager(),
)
smart_data = smart.fit_transform(df[["crim"]])
assert isinstance(smart.transformer, SimpleImputer)
# assert smart.transformer.name == "impute"
# not relevant anymore.
std = SimpleImputer()
std_data = std.fit_transform(df[["crim"]])
assert smart_data.equals(std_data)
smart.fit(df[["crim"]])
smart_data = smart.transform(df[["crim"]])
std.fit(df[["crim"]])
std_data = std.transform(df[["crim"]])
assert std_data.columns[0] == "crim"
# TODO, remove when SmartTransformer is no longer wrapped
# Column names will be different, thus np.allclose() is used
assert np.allclose(smart_data, std_data)
def test_smarttransformer_function_override_invalid(smart_child):
"""Test invalid SmartTransformer override transformer class.
Args:
smart_child: A subclass of SmartTransformer.
"""
from foreshadow.exceptions import TransformerNotFound
with pytest.raises(TransformerNotFound) as e:
smart_child(transformer="BAD", cache_manager=CacheManager())
assert "Could not find transformer BAD in" in str(e.value)
def test_smarttransformer_set_params_override(smart_child):
"""Test invalid SmartTransformer override transformer class.
Args:
smart_child: A subclass of SmartTransformer.
"""
from foreshadow.concrete import StandardScaler
smart = smart_child(transformer="SimpleImputer")
smart.set_params(**{"transformer": "StandardScaler"})
assert isinstance(smart.transformer, StandardScaler)
def test_smarttransformer_set_params_empty(smart_child):
"""Test SmartTransformer empty set_params does not fail.
Args:
smart_child: A subclass of SmartTransformer.
"""
smart = smart_child()
smart.set_params()
assert smart.transformer is None
def test_smarttransformer_set_params_default(smart_child):
"""Test SmartTransformer pass-through set_params on selected transformer.
Args:
smart_child: A subclass of SmartTransformer.
"""
smart = smart_child()
smart.fit([1, 2, 3])
before = smart.__dict__
params = smart.get_params()
smart = smart_child().set_params(**params)
assert smart.__dict__ == before
def test_smarttransformer_get_params(smart_child):
"""Test SmartTransformer override with init kwargs.
Args:
smart_child: A subclass of SmartTransformer.
"""
import numpy as np
cm = CacheManager()
smart = smart_child(
transformer="SimpleImputer",
missing_values=np.nan,
strategy="mean",
cache_manager=cm,
)
smart.fit([1, 2, 3])
params = smart.get_params()
print(params)
assert np.isnan(params["transformer__missing_values"])
del params["transformer__missing_values"]
assert params == {
"transformer": smart.transformer,
"name": None,
"keep_columns": False,
"y_var": False,
"force_reresolve": False,
"should_resolve": False,
"cache_manager": cm,
"check_wrapped": True,
"transformer__copy": True,
# "transformer__missing_values": np.nan,
"transformer__strategy": "mean",
"transformer__verbose": 0,
# "transformer__axis": 0,
"transformer__add_indicator": False,
"transformer__fill_value": None,
}
def test_smarttransformer_empty_inverse(smart_child):
"""Test SmartTransformer inverse_transform.
Args:
smart_child: A subclass of SmartTransformer.
"""
smart = smart_child(cache_manager=CacheManager())
smart.fit([1, 2, 10])
smart.inverse_transform([])
def test_smarttransformer_should_resolve(smart_child, mocker):
"""Test SmartTransformer should_resolve functionality.
First test if the initial behavior works, only resolves the transformer
once and does not update chosen transformer on new data.
Next, test if enabling should resolve allows the transformer choice to be
updated but only once.
Lastly, test if force_reresolve allows the transformer choice to be updated
on each fit.
Args:
smart_child: A subclass of SmartTransformer.
"""
import pandas as pd
from foreshadow.concrete import StandardScaler, MinMaxScaler
smart = smart_child(cache_manager=CacheManager())
smart.pick_transformer = pick_transformer
data1 = pd.DataFrame([0])
data2 = pd.DataFrame([1])
smart.fit(data1)
assert isinstance(smart.transformer, StandardScaler)
smart.fit(data2)
assert isinstance(smart.transformer, StandardScaler)
smart.should_resolve = True
smart.fit(data2)
assert isinstance(smart.transformer, MinMaxScaler)
smart.fit(data1)
assert isinstance(smart.transformer, MinMaxScaler)
smart.force_reresolve = True
smart.fit(data1)
assert isinstance(smart.transformer, StandardScaler)
smart.fit(data2)
assert isinstance(smart.transformer, MinMaxScaler)
@pytest.mark.parametrize(
"transformer,input_csv",
[
("StandardScaler", get_file_path("data", "boston_housing.csv")),
("OneHotEncoder", get_file_path("data", "boston_housing.csv")),
("TfidfTransformer", get_file_path("data", "boston_housing.csv")),
],
)
def test_make_pandas_transformer_fit(transformer, input_csv):
"""Test pandas_wrap has initial transformer fit functionality.
Args:
transformer: wrapped transformer class name
input_csv: dataset to test on
"""
import pandas as pd
transformer = get_transformer(transformer)()
df = pd.read_csv(input_csv)
assert transformer.fit(df) == transformer
@pytest.mark.parametrize(
"transformer,expected_path",
[
("StandardScaler", "sklearn.preprocessing"),
("OneHotEncoder", "category_encoders"),
("TfidfTransformer", "sklearn.feature_extraction.text"),
],
)
def test_make_pandas_transformer_meta(transformer, expected_path):
"""Test that the wrapped transformer has proper metadata.
Args:
transformer: wrapped transformer class name
expected_path: path to the initial transformer
Returns:
"""
expected_class = get_transformer(transformer, source_lib=expected_path)
transformer = get_transformer(transformer)()
assert isinstance(transformer, expected_class) # should remain a subclass
assert type(transformer).__name__ == expected_class.__name__
assert transformer.__doc__ == expected_class.__doc__
@pytest.mark.parametrize(
"transformer,kwargs,sk_path,input_csv",
[
(
"StandardScaler",
{},
"sklearn.preprocessing",
get_file_path("data", "boston_housing.csv"),
),
(
"OneHotEncoder",
{},
"category_encoders",
get_file_path("data", "boston_housing.csv"),
),
(
"TfidfTransformer",
{},
"sklearn.feature_extraction.text",
get_file_path("data", "boston_housing.csv"),
),
],
)
def test_make_pandas_transformer_transform(
transformer, kwargs, sk_path, input_csv
):
"""Test wrapped transformer has the initial transform functionality.
Args:
transformer: wrapped transformer class name
kwargs: key word arguments for transformer initialization
sk_path: path to the module containing the wrapped sklearn
transformer
input_csv: dataset to test on
"""
import pandas as pd
import numpy as np
from scipy.sparse import issparse
sk_transformer = get_transformer(transformer, source_lib=sk_path)(**kwargs)
transformer = get_transformer(transformer)(**kwargs)
df = pd.read_csv(input_csv)
crim_df = df[["crim"]]
transformer.fit(crim_df)
sk_transformer.fit(crim_df)
sk_out = sk_transformer.transform(crim_df)
if issparse(sk_out):
sk_out = sk_out.toarray()
assert np.array_equal(transformer.transform(crim_df).values, sk_out)
@pytest.mark.parametrize(
"transformer,sk_path,input_csv",
[
(
"StandardScaler",
"sklearn.preprocessing",
get_file_path("data", "boston_housing.csv"),
),
(
"TfidfTransformer",
"sklearn.feature_extraction.text",
get_file_path("data", "boston_housing.csv"),
),
],
)
def test_make_pandas_transformer_fit_transform(
transformer, sk_path, input_csv
):
"""Test wrapped transformer has initial fit_transform functionality.
Args:
transformer: wrapped transformer
sk_path: path to the module containing the wrapped sklearn
transformer
input_csv: dataset to test on
"""
import pandas as pd
import numpy as np
from scipy.sparse import issparse
sk_transformer = get_transformer(transformer, source_lib=sk_path)()
transformer = get_transformer(transformer)()
df = pd.read_csv(input_csv)
crim_df = df[["crim"]]
sk_out = sk_transformer.fit_transform(crim_df)
if issparse(sk_out):
sk_out = sk_out.toarray()
assert np.array_equal(transformer.fit_transform(crim_df).values, sk_out)
@pytest.mark.parametrize(
"transformer,sk_path",
[
("StandardScaler", "sklearn.preprocessing"),
("TfidfTransformer", "sklearn.feature_extraction.text"),
],
)
def test_make_pandas_transformer_init(transformer, sk_path):
"""Test pandas_wrap has initial transformer init functionality.
Should be able to accept any parameters from the sklearn transformer and
initialize on the wrapped instance. They should also posses the is_wrapped
method.
Args:
transformer: wrapped transformer
sk_path: path to the module containing the wrapped sklearn
transformer
"""
sk_transformer = get_transformer(transformer, source_lib=sk_path)()
params = sk_transformer.get_params()
transformer = get_transformer(transformer)(**params)
| [
11748,
12972,
9288,
198,
198,
6738,
1674,
19106,
13,
23870,
37153,
1330,
34088,
13511,
198,
6738,
1674,
19106,
13,
26791,
1330,
651,
62,
7645,
16354,
198,
6738,
1674,
19106,
13,
26791,
13,
33407,
1330,
651,
62,
7753,
62,
6978,
628,
628,... | 2.645479 | 5,176 |
import h2o
h2o.init()
datasets = "https://raw.githubusercontent.com/DarrenCook/h2o/bk/datasets/"
data = h2o.import_file(datasets + "iris_wheader.csv")
y = "class"
x = data.names
x.remove(y)
train, valid, test = data.split_frame([0.75,0.15])
from h2o.estimators.random_forest import H2ORandomForestEstimator
m = H2ORandomForestEstimator(
ntrees=100,
stopping_metric="misclassification",
stopping_rounds=3,
stopping_tolerance=0.02, #2%
max_runtime_secs=60,
model_id="RF:stop_test"
)
m.train(x, y, train, validation_frame=valid)
| [
11748,
289,
17,
78,
198,
71,
17,
78,
13,
15003,
3419,
198,
198,
19608,
292,
1039,
796,
366,
5450,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
32708,
918,
28937,
14,
71,
17,
78,
14,
65,
74,
14,
19608,
292,
1039,
30487,
198,
7890,
... | 2.39207 | 227 |
from __future__ import division
def strongly_connected_components(
successors_by_node,
omit_single_node_components=True,
low_infinite=2**30):
"""
successors_by_node = {
"node1": ["successor1", "successor2"],
"node2": ["successor1", "successor3"]
}
http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Original implementation (by Paul Harrison), modified to accommodate
successors that do not appear as a key in successors_by_node.
"""
result = []
stack = []
low = {}
for node in successors_by_node:
visit(node)
return result
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
4299,
7634,
62,
15236,
62,
5589,
3906,
7,
198,
220,
220,
220,
220,
220,
41491,
62,
1525,
62,
17440,
11,
198,
220,
220,
220,
220,
220,
42848,
62,
29762,
62,
17440,
62,
5589,
3906,
28,
... | 2.807692 | 234 |
# Misc functions to operate on fasta files in the frame of the A. castellanii
# genome analysis project
# cmdoret, 20190502
from Bio import SeqIO
from BCBio import GFF
import time
import re
import os
def safe_request(fun):
"""
Wraps function requesting data to allow safe errors and retry.
Parameters
----------
fun : python function
The python function that queries a server
Returns
-------
wrapped_f : python function
A wrapped version of the input function which will call itself recursively
every 5 seconds if the server is overloaded and will return None if the
query record does not exist
"""
return wrapped_f
def retrieve_refseq_ids(in_ids, db, out_fa):
"""
Given a refseq db in fasta format and a list of incomplete query refseq ID,
extract the queried genomes into a new fasta file by matching the IDs.
Parameters
----------
in_ids : str
Path to a file containing one refseq ID per line.
db : str
Path to the refseq database in fasta format.
out_fa : str
Path to the output fasta file containing query genomes.
"""
query_ids = open(in_ids).read().splitlines()
found = []
with open(out_fa, "w") as genomes:
for query_rec in SeqIO.parse(db, "fasta"):
if re.search("|".join(query_ids), query_rec.id):
query_rec.id = re.search(r"[^\.]*", query_rec.id).group()
found.append(query_rec.id)
SeqIO.write(query_rec, genomes, "fasta")
print(
"%d genomes found among the %d queries." % (len(found), len(query_ids))
)
@safe_request
def fetch_fasta(seq_id, db="nucleotide", email="someone@email.com"):
"""
Downloads a genome corresponding to input sequence ID.
Parameters
----------
seq_id : str
A refseq sequence accession ID (e.g. NC_19130).
db : str
A valid Entrez database name. Some possible values are: cdd, gap, dbvar,
epigenomics, nucest, gene, genome, gds, geoprofiles, nucgss, homologene,
mesh, nuccore, protein, snp, sra, taxonomy, unigene
email : str
User email address to download from refseq.
Returns
-------
seq_record : Bio.Seq
Seq object containing the query Fasta record.
"""
Entrez.email = email
with Entrez.efetch(
db=db, rettype="fasta", retmode="text", id=seq_id
) as handle:
seq_record = SeqIO.read(handle, "fasta")
return seq_record
@safe_request
def retrieve_id_annot(id, out_gff, mode="w", email="someone@email.com"):
"""
Queries genbank record for an input ID and retrieves the genome annotations
in GFF format. Amino acid sequences are included in the GFF.
Parameters
----------
id : str
Sequence accession ID to query via Entrez.
out_gff : str
Path to the output GFF file.
mode : str
Mode in which to open the output GFF file. Should be 'w' or 'a'.
email : str
Personal email to provide when querying Entrez.
"""
handle = Entrez.efetch(
id=id,
db="nucleotide",
email=email,
rettype="gbwithparts",
retmode="full",
)
record = SeqIO.parse(handle, "genbank")
with open(out_gff, mode) as gff_handle:
GFF.write(record, gff_handle, include_fasta=False)
def gff_seq_extract(gff, fa):
"""
Extracts sequence from the attributes of CDS in a GFF into a fasta file.
The fasta headers are in the format >chrom_id|prot_id
Parameters
----------
gff_in : str
Path to the input GFF file containing "translation" and "protein_id" attributes.
fa_out : str
Path to the fasta file where the protein sequences should be written.
"""
with open(gff, "r") as gff_in, open(fa, "w") as fa_out:
for line in gff_in:
seq_ok, id_ok = False, False
fields = line.split("\t")
if fields[2] == "CDS" and not fields[0].startswith("#>"):
desc = fields[-1].split(";")
for attr in desc:
if re.search("protein_id=", attr):
prot_id = attr.split("=")[1]
id_ok = True
elif re.search("translation=", attr):
seq = attr.split("=")[1]
seq_ok = True
if seq_ok and id_ok:
header = ">" + fields[0] + "|" + prot_id
fa_out.writelines([header + "\n", seq])
| [
2,
29882,
5499,
284,
8076,
319,
3049,
64,
3696,
287,
262,
5739,
286,
262,
317,
13,
3350,
695,
3216,
72,
198,
2,
19270,
3781,
1628,
198,
2,
23991,
9997,
11,
580,
3829,
35126,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
6738,
11843,
... | 2.343445 | 1,945 |
import unittest
from expression_builder.expression_builder import ExpressionBuilder
# noinspection PyPep8Naming
| [
11748,
555,
715,
395,
198,
198,
6738,
5408,
62,
38272,
13,
38011,
62,
38272,
1330,
41986,
32875,
628,
198,
220,
220,
220,
1303,
645,
1040,
14978,
9485,
47,
538,
23,
45,
3723,
198
] | 3.606061 | 33 |
# Static typing
from typing import List
def getTrackName(apiResponse) -> List[str]:
'''
This function returns the string name of a single track from the reponse provided by the spotify API.
https://developer.spotify.com/console/get-playlist-tracks/
'''
name = apiResponse['track']['name']
artists = []
# Iterate through each artist
for artist in apiResponse['track']['artists']:
artists.append(artist['name'])
artistName = ', '.join(artists)
return f"{artistName} - {name}"
| [
2,
36125,
19720,
198,
6738,
19720,
1330,
7343,
198,
198,
4299,
651,
24802,
5376,
7,
15042,
31077,
8,
4613,
7343,
58,
2536,
5974,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
770,
2163,
5860,
262,
4731,
1438,
286,
257,
2060,
26... | 2.87027 | 185 |
RD_VERSION = 0x00
WRITE_INFO = 0x01
READ_INFO = 0x02
SEND_RAW = 0x03
| [
198,
35257,
62,
43717,
796,
657,
87,
405,
198,
18564,
12709,
62,
10778,
796,
657,
87,
486,
198,
15675,
62,
10778,
220,
796,
657,
87,
2999,
198,
50,
10619,
62,
20530,
796,
657,
87,
3070,
628
] | 2 | 36 |
from django.contrib.auth.backends import BaseBackend
from django.contrib.auth.models import User
from integrations.discord.models import DiscordCommunity, DiscordUser
from urllib import parse
import urllib.request
import json
import logging
logger = logging.getLogger(__name__)
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
1891,
2412,
1330,
7308,
7282,
437,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
4132,
9143,
13,
15410,
585,
13,
27530,
1330,
39462,
20012,
11,
... | 3.531646 | 79 |
from epiinfo import CSUtilities
from epiinfo import randata
| [
6738,
2462,
72,
10951,
1330,
9429,
18274,
2410,
198,
6738,
2462,
72,
10951,
1330,
43720,
1045,
198
] | 3.529412 | 17 |
# Q = int(input())
while (True):
try:
x,n = input().split()
x = float(x)
n = int(n)
print( '{:.4f}'.format(search(x,n)) )
except:
break
| [
198,
2,
1195,
796,
493,
7,
15414,
28955,
198,
4514,
357,
17821,
2599,
198,
197,
28311,
25,
198,
197,
197,
87,
11,
77,
796,
5128,
22446,
35312,
3419,
198,
197,
197,
87,
796,
12178,
7,
87,
8,
198,
197,
197,
77,
796,
493,
7,
77,
... | 1.935065 | 77 |
# !usr/bin/env python
# -*- coding:utf-8 -*-
if __name__ == "__main__":
print("hello")
| [
198,
2,
5145,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
220,
532,
9,
12,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7203,
31373,
4943,
... | 2.155556 | 45 |
if __name__ == '__main__':
with open("input.txt", 'r') as f:
data = f.readlines()
dancers="abcdefghijklmnop"
print(dancers)
instructions = data[0].strip().split(',')
for i in range(0, 1000000000):
for instruction in instructions:
command=instruction[0]
pars = instruction[1:]
if command=="s":
length = int(pars)
tail = dancers[-length:]
dancers=tail+dancers[0:-length]
elif command=="p":
a1, a2 = pars.split('/')
i1 = dancers.find(a1)
i2 = dancers.find(a2)
dancers = [char for char in dancers]
dancers[i1]=a2
dancers[i2]=a1
dancers = "".join(dancers)
elif command=="x":
a1, a2 = [int(x) for x in pars.split('/')]
dancers = [char for char in dancers]
dancers[a1], dancers[a2] = dancers[a2], dancers[a1]
dancers = "".join(dancers)
if dancers=="abcdefghijklmnop":
print("found it")
print(i)
print(dancers)
| [
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
351,
1280,
7203,
15414,
13,
14116,
1600,
705,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1366,
796,
277,
13,
961,
6615,
3419,
628,
... | 1.821317 | 638 |
# SyntaxError: annotated name 'var' can't be global
var = 0
foo() | [
2,
26375,
897,
12331,
25,
24708,
515,
1438,
705,
7785,
6,
460,
470,
307,
3298,
198,
198,
7785,
796,
657,
198,
21943,
3419
] | 2.869565 | 23 |
import ast
import pkg_resources
import unittest
from rgkit.settings import settings
from rgkit import rg
map_data = ast.literal_eval(
open(pkg_resources.resource_filename('rgkit', 'maps/default.py')).read())
settings.init_map(map_data)
| [
11748,
6468,
198,
11748,
279,
10025,
62,
37540,
198,
11748,
555,
715,
395,
198,
198,
6738,
48670,
15813,
13,
33692,
1330,
6460,
198,
6738,
48670,
15813,
1330,
48670,
198,
198,
8899,
62,
7890,
796,
6468,
13,
18250,
1691,
62,
18206,
7,
... | 3.0375 | 80 |
from sklearn.linear_model._base import LinearClassifierMixin, SparseCoefMixin
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import unique_labels
from abc import abstractmethod
import numpy as np
| [
6738,
1341,
35720,
13,
29127,
62,
19849,
13557,
8692,
1330,
44800,
9487,
7483,
35608,
259,
11,
1338,
17208,
7222,
891,
35608,
259,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
198,
6738,
1341,
35720,
13,
26791,
1330,
... | 3.413333 | 75 |
import sys, argparse
parser = argparse.ArgumentParser(
description='''Download and extract zip archive(s) from
https://www.sec.gov/dera/data/financial-statement-data-sets.html.''')
parser.add_argument(
'-path',
type=str,
help='''Directory path that zip archives will be saved to. May be absolute
or relative. Defaults to (creates) an ./sec_zip subdirectory within the
current working directory when no argument is specified.''')
parser.add_argument(
'-destination', '-dest',
type=str,
help='''Directory path to a folder that extracted data files will be saved
to. May be absolute or relative. Defaults to the current working directory
if no argument is provided.''')
parser.add_argument(
'-qtr',
type=str,
help='''Single financial quarter (ie: 2017q1) to be downloaded from sec.gov.
When used, qtr overrides -startqtr and -endqtr arguments if also used. Use
-qtr latest to download the latest available quarter.''')
parser.add_argument(
'-startqtr', '-start',
type=str,
default='2009q1',
help='''The starting financial quarter when downloading a range of zip
archives from sec.gov. Defaults to 2009q1, the earliest financial period
available.''')
parser.add_argument(
'-endqtr', '-end',
type=str,
help='''The ending financial quarter when downloading a range of zip
archives from sec.gov. Defaults to the latest available quarter when no
argument is specified.''')
parser.add_argument(
'-singledir', '-s',
action='store_true',
help='''By default zip archives are extracted to subdirectories that mirror
each individual archive. Use the -singledir switch to extract all data files
directly to -destination, appending filenames with the applicable financial
period.''')
parser.add_argument(
'-rmzip', '-r',
action='store_true',
help='''By default zip archives will persist after extraction. Use the
-rmzip switch to remove each archive after extraction, as well as any
parent directory specified by -path (if empty) after extraction.''')
parser.add_argument(
'-download', '-d',
action='store_true',
help='''Use the -download switch to download the specified zip archive(s)
without subsequent file extraction.''')
parser.add_argument(
'-extract', '-e',
action='store_true',
help='''Use the -extract switch to extract the specified zip archive(s) (no
download).''')
| [
11748,
25064,
11,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
198,
220,
220,
220,
6764,
28,
7061,
6,
10002,
290,
7925,
19974,
15424,
7,
82,
8,
422,
220,
198,
220,
220,
220,
3740,
1378,
2503,
13,
2363,
... | 3.035496 | 817 |
from itertools import groupby
from unstdlib import now, get_many
from sqlalchemy import orm
from briefmetrics import api, model, tasks
from briefmetrics.web.environment import Response, httpexceptions
from briefmetrics.lib.controller import Controller, Context
from briefmetrics.lib.exceptions import APIControllerError, APIError
from .api import expose_api, handle_api
@expose_api('report.create')
@expose_api('report.combine')
@expose_api('report.delete')
@expose_api('subscription.create')
@expose_api('subscription.delete')
@expose_api('funnel.create')
@expose_api('funnel.clear')
# TODO: Move this somewhere else?
| [
6738,
340,
861,
10141,
1330,
1448,
1525,
198,
6738,
15014,
67,
8019,
1330,
783,
11,
651,
62,
21834,
198,
6738,
44161,
282,
26599,
1330,
393,
76,
198,
198,
6738,
4506,
4164,
10466,
1330,
40391,
11,
2746,
11,
8861,
198,
6738,
4506,
4164... | 3.20603 | 199 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from flask import session
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.metrics import (
accuracy_score,
classification_report,
confusion_matrix,
make_scorer,
)
from skylearn.preprocessing import generic_preprocessing as gprep
classification_Reports = []
confusion_Matrix = []
accuracies = []
target_Names = []
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
42903,
1330,
6246,
198,
6738,
1341,
35720,
13,
38... | 3.272222 | 180 |
# vim: set fileencoding=utf-8 :
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import testsuite
# Bootstrap the testsuite
testsuite.setup()
import os
import re
import StringIO
import urllib
from conary.lib import util
import testbase
from catalogService.restClient import ResponseError
from catalogService.rest import baseDriver
from catalogService.rest.drivers import vcloud as dvcloud
from catalogService.rest.models import clouds
from catalogService.rest.models import credentials
from catalogService.rest.models import descriptor
from catalogService.rest.models import images
from catalogService.rest.models import instances
from catalogService_test import mockedData
_xmlNewCloud = """
<descriptorData>
<alias>vcloud2</alias>
<description>description for vcloud2</description>
<serverName>vcloud2.eng.rpath.com</serverName>
<organization>rPath</organization>
<port>2443</port>
</descriptorData>"""
_xmlNewCreds = """
<descriptorData>
<username>abc</username>
<password>12345678</password>
</descriptorData>
"""
if __name__ == "__main__":
testsuite.main()
| [
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
1058,
198,
2,
198,
2,
15069,
357,
66,
8,
35516,
5136,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2... | 3.44186 | 473 |
#!/usr/bin/env python
"""
given a point relative to the baseframe, calculate the necessary pan+tilt angle to point camera at it
given a pan+tilt angle, calculate the point relative to the base frame
"""
import unittest
#https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py
from tf import transformations as tf
import numpy as np
from ros_homebot_python import constants as c
from ros_homebot_python import utils
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
35569,
257,
966,
3585,
284,
262,
2779,
14535,
11,
15284,
262,
3306,
3425,
10,
83,
2326,
9848,
284,
966,
4676,
379,
340,
198,
35569,
257,
3425,
10,
83,
2326,
9848,
11,
152... | 3.189542 | 153 |
import logging
from os import getenv
__all__ = ['log', 'check_for_tokens']
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(asctime)s -'
' %(funcName)s - %(message)s')
log = logging.getLogger('webkin')
| [
11748,
18931,
198,
6738,
28686,
1330,
651,
24330,
198,
198,
834,
439,
834,
796,
37250,
6404,
3256,
705,
9122,
62,
1640,
62,
83,
482,
641,
20520,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
11,
198,
22... | 2.062992 | 127 |
""" This module contains configuration utilities. """
import json
import os
FJSON = 'dautils.json'
def file_exists(tocheck):
""" Checks whether a file exists.
:param tocheck: The path of the file.
:returns: True if the file exists.
"""
return os.path.isfile(tocheck)
def read_rc():
""" Reads a configuration file in the JSON format.
:returns: A dictionary representing the contents \
of the configuration file.
"""
old = None
if file_exists(FJSON):
with open(FJSON) as json_file:
old = json.load(json_file)
return old
def update_rc(key, updates):
""" Updates the JSON configuration file.
:param key: The key of the record to update.
:param updates: Values with which to update the relevant record.
"""
config = {key: updates}
old = read_rc()
if old:
old.update(config)
config = old
with open(FJSON, 'w') as json_file:
json.dump(config, json_file, indent=4, sort_keys=True)
| [
37811,
770,
8265,
4909,
8398,
20081,
13,
37227,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
37,
40386,
796,
705,
67,
2306,
4487,
13,
17752,
6,
628,
198,
4299,
2393,
62,
1069,
1023,
7,
83,
30848,
694,
2599,
198,
220,
220,
220,
37... | 2.676316 | 380 |
from django.urls import path
from .views import BlogDetailView, BlogListView
urlpatterns = [
path('post/<int:pk>/', BlogDetailView.as_view(), name='post_detail'),
path('', BlogListView.as_view(), name='home'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
14001,
11242,
603,
7680,
11,
14001,
8053,
7680,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
7353,
14,
27,
600,
25,
79,
74,
29,... | 2.707317 | 82 |
from time import sleep
import rospy
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Vector3
from mavros_msgs.msg import Thrust
from mavros_msgs.msg import AttitudeTarget
node = rospy.init_node("thrust_test")
rate = rospy.Rate(20) # Hz
pub = rospy.Publisher('/mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
print("looping!")
while True:
msg = AttitudeTarget()
msg.orientation = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
msg.body_rate = Vector3(x=0.0, y=0.0, z=0.0)
msg.thrust = -0.001
pub.publish(msg)
rate.sleep()
print("Sending thrust [%f]!" % msg.thrust)
| [
6738,
640,
1330,
3993,
198,
198,
11748,
686,
2777,
88,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
2264,
9205,
295,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
20650,
18,
198,
6738,
285,
615,
4951,
62,
907,
14542,
13,
... | 2.349624 | 266 |
"""
Created on Mar 8, 2020
@author: hwase
"""
import settings as S
import yagmail
if __name__ == '__main__':
contents = ['This is the body, and here is just text http://somedomain/image.png',
'You can find an audio file attached.', '/local/path/song.mp3']
# yagmail.register(sender_email, 'vwxaotmoawdfwxzx') # trader@2020
yagmail.SMTP(S.MAIL_SENDER, S.MAIL_PASSWORD).send('roysten.tan@gmail.com', 'test', contents)
| [
37811,
201,
198,
41972,
319,
1526,
807,
11,
12131,
201,
198,
201,
198,
31,
9800,
25,
289,
86,
589,
201,
198,
37811,
201,
198,
11748,
6460,
355,
311,
201,
198,
11748,
331,
363,
4529,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
... | 2.345178 | 197 |
from game import Game, TAVERN, TavernTile
import math
| [
6738,
983,
1330,
3776,
11,
21664,
5959,
45,
11,
32693,
35103,
198,
11748,
10688,
198
] | 3.6 | 15 |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
if __name__ == '__main__':
item = LianjiaItem()
item['联系方式'] = 'fasgseg'
for k, v in item.items():
print(k, v) | [
2,
2896,
500,
994,
262,
4981,
329,
534,
15881,
276,
3709,
198,
2,
198,
2,
4091,
10314,
287,
25,
198,
2,
3740,
1378,
31628,
13,
1416,
2416,
88,
13,
2398,
14,
268,
14,
42861,
14,
4852,
873,
14,
23814,
13,
6494,
198,
198,
11748,
15... | 2.262295 | 122 |
"""Minimal Oware server
This server doesn't actually implement the Oware game. It simply
allows a single client to connect, and speaks the game protocol well
enough to allow that client to run.
"""
import pickle
import socket
from ..model.board import Board
from ..util import ByteFIFO
if __name__ == '__main__':
main()
| [
37811,
9452,
4402,
440,
1574,
4382,
198,
198,
1212,
4382,
1595,
470,
1682,
3494,
262,
440,
1574,
983,
13,
632,
2391,
198,
47205,
257,
2060,
5456,
284,
2018,
11,
290,
9209,
262,
983,
8435,
880,
198,
48229,
284,
1249,
326,
5456,
284,
... | 3.604396 | 91 |
# Import standard python modules
import threading
import time
import os
import sys
# Import paho MQTT client.
import paho.mqtt.client as mqtt
# Define callback functions which will be called when certain events happen.
# Define Functions for Threading
if __name__ == "__main__":
if(len(sys.argv)!=2):
sys.stderr.write('Usage: "{0}" $hostAddress\n'.format(sys.argv[0]))
os._exit(1)
# Setup MQTT Client Instance
client = mqtt.Client()
# Setup Callbacks
client.on_connect = on_connect
client.on_disconnect=on_disconnect
# Setup Control Vars
client.connectedFlag=False
client.messageSend="0"
# Connect to the Broker server.
print("Conectando al broker")
client.connect(host=sys.argv[1], port=1883, keepalive=60)
client.loop_start()
while not client.connectedFlag:
print("Esperando conexión")
time.sleep(1)
# Setup Threading, to publish message every 10 seconds
hilo0=threading.Thread(target=send_message, args=(client,))
hilo0.start()
# Mod publish value
while client.messageSend!="x": # char 'x' to exit
client.messageSend=input("Ingrese nuevo valor para el tanque\n")
client.loop_stop()
client.disconnect()
| [
2,
17267,
3210,
21015,
13103,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
2,
17267,
279,
17108,
337,
48,
15751,
5456,
13,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
... | 2.809756 | 410 |
import os
import numpy as np
import chainer
| [
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
6333,
263,
198
] | 3.214286 | 14 |
# coding: utf-8
import math
import ctypes
from objc_util import parse_struct
glk2 = parse_struct('{glkvec2=fff}')
'GLKVector2Make', 'GLKVector2MakeWithArray', 'GLKVector2Length', 'GLKVector2Distance', 'GLKVector2Negate', 'GLKVector2Normalize', 'GLKVector2AddScalar', 'GLKVector2SubtractScalar', 'GLKVector2MultiplyScalar', 'GLKVector2DivideScalar', 'GLKVector2Add', 'GLKVector2Subtract', 'GLKVector2Multiply', 'GLKVector2Divide', 'GLKVector2DotProduct', 'GLKVector2Lerp', 'GLKVector2Project', 'GLKVector2Maximum', 'GLKVector2Minimum', 'GLKVector2EqualToScalar', 'GLKVector2AllEqualToVector4', 'GLKVector2AllGreaterThanOrEqualToScalar',
__all__ = ['GLKVector2', 'setGLKVector2', 'getGLKVector2']
if __name__ == '__main__':
v = GLKVector2Make(1, 1)
print(v)
print(GLKVector2AddScalar(v, 10))
print(GLKVector2Length(GLKVector2Normalize(GLKVector2AddScalar(v, 10))))
print(GLKVector2Minimum(v, GLKVector2MultiplyScalar(v, 35)))
print(GLKVector2Normalize(GLKVector2AddScalar(v, 10)))
print(GLKVector2AllGreaterThanScalar(v, 1.1))
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
10688,
198,
11748,
269,
19199,
198,
6738,
26181,
66,
62,
22602,
1330,
21136,
62,
7249,
628,
628,
628,
198,
4743,
74,
17,
796,
21136,
62,
7249,
10786,
90,
4743,
74,
35138,
17,
28,
20972,
... | 2.41387 | 447 |
from MaltegoTransform import *
from mcrits_utils import *
crits = mcrits()
me = MaltegoTransform()
me.parseArguments(sys.argv)
id_ = me.getVar('id')
crits_type = me.getVar('crits_type')
for result in crits.get_related(crits_type, id_, 'Actor'):
# For each related object, get the details.
obj = crits.get_single_obj('Actor', result[1])
# For each identifer, get the name.
identifiers = []
for id_dict in obj['identifiers']:
id_obj = crits.get_single_obj('ActorIdentifier',
id_dict['identifier_id'])
identifiers.append(id_obj['name'])
ent = me.addEntity(result[0], obj['name'])
ent.addAdditionalFields(fieldName='id',
displayName='id',
value=result[1])
ent.addAdditionalFields(fieldName='aliases',
displayName='Aliases',
value=obj['aliases'])
ent.addAdditionalFields(fieldName='identifiers',
displayName='Identifiers',
value=identifiers)
me.returnOutput()
| [
6738,
4434,
660,
2188,
41762,
1330,
1635,
198,
6738,
285,
22213,
82,
62,
26791,
1330,
1635,
198,
198,
22213,
82,
796,
285,
22213,
82,
3419,
198,
198,
1326,
796,
4434,
660,
2188,
41762,
3419,
198,
1326,
13,
29572,
28100,
2886,
7,
17597... | 2.060886 | 542 |
#!/usr/bin/env python
import sys
import os
import unittest
from mock import patch, MagicMock, ANY, call
import pylacuna.core.map
import ast
from sys import version_info
if version_info.major == 2:
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
GET_STAR_MAP_RESPONSE = ast.literaleval('''
{u'id': 7,
u'jsonrpc': u'2.0',
u'result': {u'stars': [{u'bodies': [{u'id': u'356648',
u'image': u'p18-1',
u'name': u'Kleahien 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'50',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'416',
u'y': u'-261',
u'zone': u'1|-1'},
{u'id': u'356649',
u'image': u'p28-2',
u'name': u'Kleahien 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'68',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'417',
u'y': u'-262',
u'zone': u'1|-1'},
{u'empire': {u'alignment': u'hostile',
u'id': u'38995',
u'is_isolationist': u'0',
u'name': u'dev'},
u'id': u'356650',
u'image': u'p38-3',
u'name': u'Polyhedra 3',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'417',
u'y': u'-264',
u'zone': u'1|-1'},
{u'id': u'356651',
u'image': u'a22-5',
u'name': u'Kleahien 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 900,
u'beryl': 1,
u'chalcopyrite': 700,
u'chromite': 900,
u'fluorite': 100,
u'galena': 800,
u'goethite': 400,
u'gold': 100,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 500,
u'methane': 1,
u'monazite': 300,
u'rutile': 800,
u'sulfur': 1,
u'trona': 400,
u'uraninite': 1,
u'zircon': 200},
u'size': u'6',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'asteroid',
u'x': u'414',
u'y': u'-265',
u'zone': u'1|-1'},
{u'id': u'356652',
u'image': u'p37-6',
u'name': u'Kleahien 6',
u'orbit': u'6',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'60',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'413',
u'y': u'-264',
u'zone': u'1|-1'},
{u'id': u'356653',
u'image': u'p7-8',
u'name': u'Kleahien 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 1700,
u'beryl': 1000,
u'chalcopyrite': 2800,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2400,
u'gold': 1,
u'gypsum': 2100,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'57',
u'star_id': u'49528',
u'star_name': u'Kleahien',
u'type': u'habitable planet',
u'water': 5700,
u'x': u'414',
u'y': u'-261',
u'zone': u'1|-1'}],
u'color': u'magenta',
u'id': u'49528',
u'influence': u'0',
u'name': u'Kleahien',
u'x': u'415',
u'y': u'-263',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'359536',
u'image': u'pg4-1',
u'name': u'Chea Oobixy 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 2000,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 14000,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 4000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'105',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'gas giant',
u'water': 0,
u'x': u'418',
u'y': u'-251',
u'zone': u'1|-1'},
{u'id': u'359537',
u'image': u'p7-2',
u'name': u'Chea Oobixy 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1700,
u'beryl': 1000,
u'chalcopyrite': 2800,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2400,
u'gold': 1,
u'gypsum': 2100,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'66',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 5700,
u'x': u'419',
u'y': u'-252',
u'zone': u'1|-1'},
{u'id': u'359538',
u'image': u'p10-3',
u'name': u'Tau chy 1',
u'orbit': u'3',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'45',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'419',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'359539',
u'image': u'p18-4',
u'name': u'Toppie Terra',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'418',
u'y': u'-255',
u'zone': u'1|-1'},
{u'id': u'359540',
u'image': u'pg4-5',
u'name': u'Chea Oobixy 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 2000,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 14000,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 4000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'95',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'gas giant',
u'water': 0,
u'x': u'416',
u'y': u'-255',
u'zone': u'1|-1'},
{u'id': u'359541',
u'image': u'p38-6',
u'name': u'Chea Oobixy 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'55',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'415',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'359542',
u'image': u'p5-7',
u'name': u'Chea Oobixy 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'52',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'415',
u'y': u'-252',
u'zone': u'1|-1'},
{u'id': u'359543',
u'image': u'p37-8',
u'name': u'Chea Oobixy 8',
u'orbit': u'8',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'49',
u'star_id': u'49928',
u'star_name': u'Chea Oobixy',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'416',
u'y': u'-251',
u'zone': u'1|-1'}],
u'color': u'blue',
u'id': u'49928',
u'influence': u'0',
u'name': u'Chea Oobixy',
u'x': u'417',
u'y': u'-253',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'362428',
u'image': u'a13-1',
u'name': u'Tchia Thowdo Oxy 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 6574,
u'uraninite': 1,
u'zircon': 2590},
u'size': u'10',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'418',
u'y': u'-241',
u'zone': u'1|0'},
{u'id': u'362429',
u'image': u'a12-2',
u'name': u'Tchia Thowdo Oxy 2',
u'orbit': u'2',
u'ore': {u'anthracite': 289,
u'bauxite': 269,
u'beryl': 313,
u'chalcopyrite': 299,
u'chromite': 320,
u'fluorite': 307,
u'galena': 278,
u'goethite': 292,
u'gold': 310,
u'gypsum': 311,
u'halite': 301,
u'kerogen': 284,
u'magnetite': 296,
u'methane': 285,
u'monazite': 319,
u'rutile': 258,
u'sulfur': 324,
u'trona': 293,
u'uraninite': 276,
u'zircon': 275},
u'size': u'6',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'419',
u'y': u'-242',
u'zone': u'1|0'},
{u'id': u'362430',
u'image': u'a3-3',
u'name': u'Tchia Thowdo Oxy 3',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1000,
u'zircon': 8000},
u'size': u'9',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'419',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'362431',
u'image': u'a20-4',
u'name': u'Tchia Thowdo Oxy 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 6342,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'2',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'418',
u'y': u'-245',
u'zone': u'1|0'},
{u'id': u'362432',
u'image': u'a20-5',
u'name': u'Tchia Thowdo Oxy 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 6342,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'6',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'416',
u'y': u'-245',
u'zone': u'1|0'},
{u'id': u'362433',
u'image': u'a14-6',
u'name': u'Tchia Thowdo Oxy 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 3038,
u'goethite': 2895,
u'gold': 1,
u'gypsum': 2897,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'9',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'415',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'362434',
u'image': u'a12-7',
u'name': u'Tchia Thowdo Oxy 7',
u'orbit': u'7',
u'ore': {u'anthracite': 289,
u'bauxite': 269,
u'beryl': 313,
u'chalcopyrite': 299,
u'chromite': 320,
u'fluorite': 307,
u'galena': 278,
u'goethite': 292,
u'gold': 310,
u'gypsum': 311,
u'halite': 301,
u'kerogen': 284,
u'magnetite': 296,
u'methane': 285,
u'monazite': 319,
u'rutile': 258,
u'sulfur': 324,
u'trona': 293,
u'uraninite': 276,
u'zircon': 275},
u'size': u'4',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'415',
u'y': u'-242',
u'zone': u'1|0'},
{u'id': u'362435',
u'image': u'a2-8',
u'name': u'Tchia Thowdo Oxy 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 4000,
u'chalcopyrite': 5000,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1000},
u'size': u'2',
u'star_id': u'50328',
u'star_name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'416',
u'y': u'-241',
u'zone': u'1|0'}],
u'color': u'white',
u'id': u'50328',
u'influence': u'227',
u'name': u'Tchia Thowdo Oxy',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'x': u'417',
u'y': u'-243',
u'zone': u'1|0'},
{u'bodies': [{u'id': u'360995',
u'image': u'a20-1',
u'name': u'Oum Froassoo Sta 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 6342,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'5',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'424',
u'y': u'-243',
u'zone': u'1|0'},
{u'id': u'360996',
u'image': u'a14-2',
u'name': u'Oum Froassoo Sta 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 3038,
u'goethite': 2895,
u'gold': 1,
u'gypsum': 2897,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'8',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'425',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'360997',
u'image': u'a3-3',
u'name': u'Oum Froassoo Sta 3',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1000,
u'zircon': 8000},
u'size': u'3',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'425',
u'y': u'-246',
u'zone': u'1|0'},
{u'id': u'360998',
u'image': u'pg3-4',
u'name': u'Oum Froassoo Sta 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 4000,
u'halite': 14000,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 2000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'88',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'gas giant',
u'water': 0,
u'x': u'424',
u'y': u'-247',
u'zone': u'1|0'},
{u'id': u'360999',
u'image': u'pg4-5',
u'name': u'Oum Froassoo Sta 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 2000,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 14000,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 4000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'107',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'gas giant',
u'water': 0,
u'x': u'422',
u'y': u'-247',
u'zone': u'1|0'},
{u'id': u'361000',
u'image': u'a18-6',
u'name': u'Oum Froassoo Sta 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 4120,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 3326,
u'uraninite': 1,
u'zircon': 1},
u'size': u'7',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'421',
u'y': u'-246',
u'zone': u'1|0'},
{u'id': u'361001',
u'image': u'a13-7',
u'name': u'Oum Froassoo Sta 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 6574,
u'uraninite': 1,
u'zircon': 2590},
u'size': u'6',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'421',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'361002',
u'image': u'a4-8',
u'name': u'Oum Froassoo Sta 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1000,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 9000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'3',
u'star_id': u'50129',
u'star_name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'422',
u'y': u'-243',
u'zone': u'1|0'}],
u'color': u'white',
u'id': u'50129',
u'influence': u'254',
u'name': u'Oum Froassoo Sta',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'x': u'423',
u'y': u'-245',
u'zone': u'1|0'},
{u'bodies': [{u'id': u'358097',
u'image': u'pg3-1',
u'name': u'Ouss Siek 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 4000,
u'halite': 14000,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 2000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'104',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'gas giant',
u'water': 0,
u'x': u'425',
u'y': u'-253',
u'zone': u'1|-1'},
{u'id': u'358098',
u'image': u'p28-2',
u'name': u'Ouss Siek 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'64',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'426',
u'y': u'-254',
u'zone': u'1|-1'},
{u'build_queue_len': 7,
u'build_queue_size': 9,
u'building_count': 43,
u'empire': {u'alignment': u'self',
u'id': u'51819',
u'is_isolationist': u'1',
u'name': u'MikeTwo'},
u'energy_capacity': u'280094',
u'energy_hour': u'3537',
u'energy_stored': u'104737',
u'food_capacity': u'284472',
u'food_hour': 5171,
u'food_stored': 284472,
u'happiness': u'352459',
u'happiness_hour': u'680',
u'id': u'358099',
u'image': u'p38-3',
u'incoming_enemy_ships': [{u'date_arrives': u'06 09 2015 18:33:00 +0000',
u'id': u'51549238',
u'is_ally': 0,
u'is_own': 0}],
u'name': u'Cloraphorm III',
u'needs_surface_refresh': u'0',
u'neutral_entry': u'10 07 2015 07:26:44 +0000',
u'num_incoming_ally': u'0',
u'num_incoming_enemy': u'1',
u'num_incoming_own': u'0',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'ore_capacity': u'295866',
u'ore_hour': 7794,
u'ore_stored': 202664,
u'plots_available': u'2',
u'population': 2880000,
u'propaganda_boost': u'0',
u'size': u'45',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'surface_version': u'358',
u'type': u'habitable planet',
u'waste_capacity': u'289133',
u'waste_hour': u'239',
u'waste_stored': u'288328',
u'water': 8000,
u'water_capacity': u'280094',
u'water_hour': u'5476',
u'water_stored': u'280094',
u'x': u'426',
u'y': u'-256',
u'zone': u'1|-1'},
{u'id': u'358100',
u'image': u'a7-4',
u'name': u'Ouss Siek 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3291,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1239,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2377,
u'zircon': 1},
u'size': u'5',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'asteroid',
u'x': u'425',
u'y': u'-257',
u'zone': u'1|-1'},
{u'id': u'358101',
u'image': u'p28-5',
u'name': u'Ouss Siek 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'56',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'423',
u'y': u'-257',
u'zone': u'1|-1'},
{u'id': u'358102',
u'image': u'p37-6',
u'name': u'Ouss Siek 6',
u'orbit': u'6',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'53',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'422',
u'y': u'-256',
u'zone': u'1|-1'},
{u'id': u'358103',
u'image': u'pg3-7',
u'name': u'Ouss Siek 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 4000,
u'halite': 14000,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 2000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'86',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'gas giant',
u'water': 0,
u'x': u'422',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'358104',
u'image': u'p37-8',
u'name': u'Ouss Siek 8',
u'orbit': u'8',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'69',
u'star_id': u'49729',
u'star_name': u'Ouss Siek',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'423',
u'y': u'-253',
u'zone': u'1|-1'}],
u'color': u'blue',
u'id': u'49729',
u'influence': u'0',
u'name': u'Ouss Siek',
u'x': u'424',
u'y': u'-255',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'355186',
u'image': u'a12-1',
u'name': u'Iondeuzz 1',
u'orbit': u'1',
u'ore': {u'anthracite': 289,
u'bauxite': 269,
u'beryl': 313,
u'chalcopyrite': 299,
u'chromite': 320,
u'fluorite': 307,
u'galena': 278,
u'goethite': 292,
u'gold': 310,
u'gypsum': 311,
u'halite': 301,
u'kerogen': 284,
u'magnetite': 296,
u'methane': 285,
u'monazite': 319,
u'rutile': 258,
u'sulfur': 324,
u'trona': 293,
u'uraninite': 276,
u'zircon': 275},
u'size': u'8',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'asteroid',
u'x': u'426',
u'y': u'-265',
u'zone': u'1|-1'},
{u'id': u'355187',
u'image': u'p13-2',
u'name': u'Iondeuzz 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1500,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1300,
u'chromite': 1400,
u'fluorite': 1,
u'galena': 2200,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1500,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 2100,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'66',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 8300,
u'x': u'427',
u'y': u'-266',
u'zone': u'1|-1'},
{u'id': u'355188',
u'image': u'p10-3',
u'name': u'Iondeuzz 3',
u'orbit': u'3',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'45',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'427',
u'y': u'-268',
u'zone': u'1|-1'},
{u'id': u'355189',
u'image': u'p18-4',
u'name': u"Khim'bar",
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'426',
u'y': u'-269',
u'zone': u'1|-1'},
{u'id': u'355190',
u'image': u'p28-5',
u'name': u'Iondeuzz 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'58',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'424',
u'y': u'-269',
u'zone': u'1|-1'},
{u'id': u'355191',
u'image': u'p38-6',
u'name': u'Iondeuzz 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'55',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'423',
u'y': u'-268',
u'zone': u'1|-1'},
{u'id': u'355192',
u'image': u'p5-8',
u'name': u'Iondeuzz 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'53',
u'star_id': u'49329',
u'star_name': u'Iondeuzz',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'424',
u'y': u'-265',
u'zone': u'1|-1'}],
u'color': u'yellow',
u'id': u'49329',
u'influence': u'0',
u'name': u'Iondeuzz',
u'x': u'425',
u'y': u'-267',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'301055',
u'image': u'p36-4',
u'name': u'Aegh Oxyeufr 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 100,
u'beryl': 1,
u'chalcopyrite': 100,
u'chromite': 100,
u'fluorite': 1,
u'galena': 1,
u'goethite': 100,
u'gold': 1,
u'gypsum': 100,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 100,
u'rutile': 100,
u'sulfur': 100,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 30000,
u'x': u'431',
u'y': u'-263',
u'zone': u'1|-1'},
{u'empire': {u'alignment': u'hostile',
u'id': u'50180',
u'is_isolationist': u'0',
u'name': u'Cybertronian Defense League'},
u'id': u'332066',
u'image': u'p35-3',
u'name': u'in4',
u'orbit': u'3',
u'ore': {u'anthracite': 500,
u'bauxite': 500,
u'beryl': 500,
u'chalcopyrite': 500,
u'chromite': 500,
u'fluorite': 500,
u'galena': 500,
u'goethite': 500,
u'gold': 500,
u'gypsum': 500,
u'halite': 500,
u'kerogen': 500,
u'magnetite': 500,
u'methane': 500,
u'monazite': 500,
u'rutile': 500,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 500,
u'zircon': 500},
u'size': u'70',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 9467,
u'x': u'432',
u'y': u'-262',
u'zone': u'1|-1'},
{u'id': u'356654',
u'image': u'p10-1',
u'name': u'Ovoahuij 1',
u'orbit': u'1',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'55',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'431',
u'y': u'-259',
u'zone': u'1|-1'},
{u'id': u'356655',
u'image': u'p18-2',
u'name': u'Ovoahuij 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'52',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'432',
u'y': u'-260',
u'zone': u'1|-1'},
{u'id': u'356658',
u'image': u'p18-5',
u'name': u'Ovoahuij 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'65',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'429',
u'y': u'-263',
u'zone': u'1|-1'},
{u'id': u'356659',
u'image': u'p28-6',
u'name': u'Alajuwon',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'428',
u'y': u'-262',
u'zone': u'1|-1'},
{u'id': u'356660',
u'image': u'p38-7',
u'name': u'Ovoahuij 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'59',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'428',
u'y': u'-260',
u'zone': u'1|-1'},
{u'id': u'356661',
u'image': u'p5-8',
u'name': u'Ovoahuij 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'56',
u'star_id': u'49529',
u'star_name': u'Ovoahuij',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'429',
u'y': u'-259',
u'zone': u'1|-1'}],
u'color': u'blue',
u'id': u'49529',
u'influence': u'0',
u'name': u'Ovoahuij',
u'x': u'430',
u'y': u'-261',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'353720',
u'image': u'a7-1',
u'name': u'Plia Osloy Sai 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3291,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1239,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2377,
u'zircon': 1},
u'size': u'4',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'asteroid',
u'x': u'433',
u'y': u'-268',
u'zone': u'1|-1'},
{u'id': u'353721',
u'image': u'p28-2',
u'name': u'severus',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'434',
u'y': u'-269',
u'zone': u'1|-1'},
{u'id': u'353722',
u'image': u'p38-4',
u'name': u'Plia Osloy Sai 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'51',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'433',
u'y': u'-272',
u'zone': u'1|-1'},
{u'id': u'353723',
u'image': u'p5-5',
u'name': u'Plia Osloy Sai 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'431',
u'y': u'-272',
u'zone': u'1|-1'},
{u'id': u'353724',
u'image': u'p37-6',
u'name': u'T8TE Alpha',
u'orbit': u'6',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'430',
u'y': u'-271',
u'zone': u'1|-1'},
{u'id': u'353725',
u'image': u'p37-7',
u'name': u'Plia Osloy Sai 7',
u'orbit': u'7',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'64',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'430',
u'y': u'-269',
u'zone': u'1|-1'},
{u'id': u'353726',
u'image': u'p5-8',
u'name': u'Plia Osloy Sai 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'61',
u'star_id': u'49129',
u'star_name': u'Plia Osloy Sai',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'431',
u'y': u'-268',
u'zone': u'1|-1'}],
u'color': u'magenta',
u'id': u'49129',
u'influence': u'0',
u'name': u'Plia Osloy Sai',
u'x': u'432',
u'y': u'-270',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'359544',
u'image': u'p7-1',
u'name': u'Iostr Ufeass 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1700,
u'beryl': 1000,
u'chalcopyrite': 2800,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2400,
u'gold': 1,
u'gypsum': 2100,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 5700,
u'x': u'433',
u'y': u'-250',
u'zone': u'1|-1'},
{u'id': u'359545',
u'image': u'p10-2',
u'name': u'Coraton',
u'orbit': u'2',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'45',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'434',
u'y': u'-251',
u'zone': u'1|-1'},
{u'id': u'359546',
u'image': u'p37-3',
u'name': u'Iostr Ufeass 3',
u'orbit': u'3',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'434',
u'y': u'-253',
u'zone': u'1|-1'},
{u'id': u'359547',
u'image': u'a16-4',
u'name': u'Iostr Ufeass 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1894,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1793,
u'galena': 1,
u'goethite': 1,
u'gold': 2132,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 2018,
u'uraninite': 1,
u'zircon': 1},
u'size': u'5',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'asteroid',
u'x': u'433',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'359548',
u'image': u'p10-5',
u'name': u'Iostr Ufeass 5',
u'orbit': u'5',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'57',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'431',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'359549',
u'image': u'pg3-6',
u'name': u'Iostr Ufeass 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 4000,
u'halite': 14000,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 2000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'90',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'gas giant',
u'water': 0,
u'x': u'430',
u'y': u'-253',
u'zone': u'1|-1'},
{u'id': u'359550',
u'image': u'p28-7',
u'name': u'Iostr Ufeass 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'51',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'430',
u'y': u'-251',
u'zone': u'1|-1'},
{u'id': u'359551',
u'image': u'p38-8',
u'name': u'Iostr Ufeass 8',
u'orbit': u'8',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'49',
u'star_id': u'49929',
u'star_name': u'Iostr Ufeass',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'431',
u'y': u'-250',
u'zone': u'1|-1'}],
u'color': u'white',
u'id': u'49929',
u'influence': u'0',
u'name': u'Iostr Ufeass',
u'x': u'432',
u'y': u'-252',
u'zone': u'1|-1'},
{u'bodies': [{u'id': u'362436',
u'image': u'a1-1',
u'name': u'Vlea Isphern Oaly 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1000,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 9000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'7',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'433',
u'y': u'-241',
u'zone': u'1|0'},
{u'id': u'362437',
u'image': u'a20-2',
u'name': u'Vlea Isphern Oaly 2',
u'orbit': u'2',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 6342,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'1',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'434',
u'y': u'-242',
u'zone': u'1|0'},
{u'id': u'362438',
u'image': u'a1-3',
u'name': u'Vlea Isphern Oaly 3',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1000,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 9000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'4',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'434',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'362439',
u'image': u'a23-4',
u'name': u'Vlea Isphern Oaly 4',
u'orbit': u'4',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1000,
u'chalcopyrite': 1000,
u'chromite': 1000,
u'fluorite': 1000,
u'galena': 1000,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1000,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1000},
u'size': u'8',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'433',
u'y': u'-245',
u'zone': u'1|0'},
{u'id': u'362440',
u'image': u'a13-5',
u'name': u'Vlea Isphern Oaly 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 6574,
u'uraninite': 1,
u'zircon': 2590},
u'size': u'7',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'431',
u'y': u'-245',
u'zone': u'1|0'},
{u'id': u'362441',
u'image': u'a2-6',
u'name': u'Vlea Isphern Oaly 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 4000,
u'chalcopyrite': 5000,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1000},
u'size': u'3',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'430',
u'y': u'-244',
u'zone': u'1|0'},
{u'id': u'362442',
u'image': u'a9-7',
u'name': u'Vlea Isphern Oaly 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 5500,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'6',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'430',
u'y': u'-242',
u'zone': u'1|0'},
{u'id': u'362443',
u'image': u'a10-8',
u'name': u'Vlea Isphern Oaly 8',
u'orbit': u'8',
u'ore': {u'anthracite': 6250,
u'bauxite': 108,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 55,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 300,
u'uraninite': 1,
u'zircon': 1},
u'size': u'9',
u'star_id': u'50329',
u'star_name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'type': u'asteroid',
u'x': u'431',
u'y': u'-241',
u'zone': u'1|0'}],
u'color': u'magenta',
u'id': u'50329',
u'influence': u'276',
u'name': u'Vlea Isphern Oaly',
u'station': {u'id': u'360983',
u'name': u'SASS 5',
u'x': u'424',
u'y': u'-213'},
u'x': u'432',
u'y': u'-243',
u'zone': u'1|0'},
{u'color': u'blue',
u'id': u'50130',
u'influence': u'192',
u'name': u'Aw Ohaeph Vli',
u'x': u'438',
u'y': u'-247',
u'zone': u'1|0'},
{u'bodies': [{u'id': u'358105',
u'image': u'p13-1',
u'name': u'Chou Idow 1',
u'orbit': u'1',
u'ore': {u'anthracite': 1500,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1300,
u'chromite': 1400,
u'fluorite': 1,
u'galena': 2200,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1500,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 2100,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'45',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 8300,
u'x': u'440',
u'y': u'-254',
u'zone': u'1|-1'},
{u'id': u'358106',
u'image': u'p10-2',
u'name': u'Chou Idow 2',
u'orbit': u'2',
u'ore': {u'anthracite': 500,
u'bauxite': 1,
u'beryl': 250,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 250,
u'galena': 1,
u'goethite': 1000,
u'gold': 1,
u'gypsum': 500,
u'halite': 1,
u'kerogen': 500,
u'magnetite': 5000,
u'methane': 500,
u'monazite': 250,
u'rutile': 1,
u'sulfur': 500,
u'trona': 500,
u'uraninite': 1,
u'zircon': 250},
u'size': u'63',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 6800,
u'x': u'441',
u'y': u'-255',
u'zone': u'1|-1'},
{u'empire': {u'alignment': u'hostile',
u'id': u'46258',
u'is_isolationist': u'0',
u'name': u'Last Legion'},
u'id': u'358107',
u'image': u'p18-3',
u'name': u'in1',
u'orbit': u'3',
u'ore': {u'anthracite': 1,
u'bauxite': 4200,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 3200,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2600,
u'zircon': 1},
u'size': u'70',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 7600,
u'x': u'441',
u'y': u'-257',
u'zone': u'1|-1'},
{u'id': u'358108',
u'image': u'p28-5',
u'name': u'Chou Idow 5',
u'orbit': u'5',
u'ore': {u'anthracite': 1,
u'bauxite': 1500,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 2500,
u'gold': 1,
u'gypsum': 2000,
u'halite': 3500,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 1500,
u'zircon': 1},
u'size': u'58',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 9230,
u'x': u'438',
u'y': u'-258',
u'zone': u'1|-1'},
{u'id': u'358109',
u'image': u'p38-6',
u'name': u'Chou Idow 6',
u'orbit': u'6',
u'ore': {u'anthracite': 1,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 3000,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 1,
u'magnetite': 1,
u'methane': 1,
u'monazite': 1,
u'rutile': 1,
u'sulfur': 7000,
u'trona': 1,
u'uraninite': 1,
u'zircon': 1},
u'size': u'55',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 8000,
u'x': u'437',
u'y': u'-257',
u'zone': u'1|-1'},
{u'id': u'358110',
u'image': u'p5-7',
u'name': u'Chou Idow 7',
u'orbit': u'7',
u'ore': {u'anthracite': 1,
u'bauxite': 2250,
u'beryl': 1,
u'chalcopyrite': 250,
u'chromite': 1,
u'fluorite': 1,
u'galena': 2250,
u'goethite': 1250,
u'gold': 1,
u'gypsum': 1250,
u'halite': 250,
u'kerogen': 1,
u'magnetite': 250,
u'methane': 250,
u'monazite': 1,
u'rutile': 1250,
u'sulfur': 250,
u'trona': 250,
u'uraninite': 250,
u'zircon': 1},
u'size': u'52',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 6200,
u'x': u'437',
u'y': u'-255',
u'zone': u'1|-1'},
{u'id': u'358111',
u'image': u'p37-8',
u'name': u'Chou Idow 8',
u'orbit': u'8',
u'ore': {u'anthracite': 2000,
u'bauxite': 1,
u'beryl': 1,
u'chalcopyrite': 1,
u'chromite': 1,
u'fluorite': 1,
u'galena': 1,
u'goethite': 1,
u'gold': 1,
u'gypsum': 1,
u'halite': 1,
u'kerogen': 2000,
u'magnetite': 2000,
u'methane': 1,
u'monazite': 2000,
u'rutile': 1,
u'sulfur': 1,
u'trona': 1,
u'uraninite': 2000,
u'zircon': 1},
u'size': u'50',
u'star_id': u'49730',
u'star_name': u'Chou Idow',
u'type': u'habitable planet',
u'water': 6225,
u'x': u'438',
u'y': u'-254',
u'zone': u'1|-1'}],
u'color': u'green',
u'id': u'49730',
u'influence': u'0',
u'name': u'Chou Idow',
u'x': u'439',
u'y': u'-256',
u'zone': u'1|-1'},
{u'color': u'magenta',
u'id': u'49330',
u'influence': u'0',
u'name': u'Eckladdee',
u'x': u'440',
u'y': u'-267',
u'zone': u'1|-1'}],
u'status': {u'empire': {u'alliance_id': u'1785',
u'colonies': {u'358099': u'Cloraphorm III'},
u'essentia': 0,
u'has_new_messages': u'0',
u'home_planet_id': u'358099',
u'id': u'51819',
u'insurrect_value': u'100000',
u'is_isolationist': u'1',
u'latest_message_id': u'0',
u'name': u'MikeTwo',
u'next_colony_cost': u'100000',
u'next_colony_srcs': u'100000',
u'next_station_cost': u'108696',
u'planets': {u'358099': u'Cloraphorm III'},
u'primary_embassy_id': u'5048765',
u'rpc_count': 276,
u'self_destruct_active': u'0',
u'self_destruct_date': u'08 06 2015 05:49:38 +0000',
u'stations': {},
u'status_message': u'Just getting started',
u'tech_level': u'9'},
u'server': {u'rpc_limit': 10000,
u'star_map_size': {u'x': [-1500, 1500], u'y': [-1500, 1500]},
u'time': u'10 07 2015 17:41:04 +0000',
u'version': 3.0911}}}}
''')
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
15290,
1330,
8529,
11,
6139,
44,
735,
11,
15529,
11,
869,
198,
198,
11748,
279,
2645,
330,
9613,
13,
7295,
13,
... | 1.613346 | 44,704 |
#Write a guessing game where the user has to guess a secret number.
# After every guess the program tells the user whether their number was too large or too small.
# At the end the number of tries needed should be printed.
# It counts only as one try if they input the same number multiple times consecutively.
import random
lov=[]
secretnum=random.randrange(1,100)
print(secretnum)
guess=()
print("Please input your guess")
while guess!=secretnum:
guess = input()
while guess.isdigit() == False:
print("Your input is not a valid number, please try again")
guess = input()
if int(guess)<secretnum:
print("Your input number is lower than the secret number, try higher")
print("Please input your guess again")
lov.append(guess)
if int(guess)>secretnum:
print("Your input number is higher than the secret number, try lower")
print("Please input your guess again")
lov.append(guess)
if int(guess)==secretnum:
#count times user have tried to input
lov=list(set(lov))
count=len(lov)+1
print("Bingo, You've guessed it correcly in {} times".format(count))
| [
2,
16594,
257,
25260,
983,
810,
262,
2836,
468,
284,
4724,
257,
3200,
1271,
13,
198,
2,
2293,
790,
4724,
262,
1430,
4952,
262,
2836,
1771,
511,
1271,
373,
1165,
1588,
393,
1165,
1402,
13,
198,
2,
1629,
262,
886,
262,
1271,
286,
84... | 2.934509 | 397 |
import os
from tkinter import filedialog, messagebox
from dialogs.password_prompt import askpassword
DOCUMENT_PATH = os.path.expanduser("~")
FILETYPE_ALL = ("All types", "*.*")
FILETYPE_PDF = ("PDF File", "*.pdf")
FILETYPE_CSV = ("CSV File", "*.csv")
DIALOG_TITLE = "Select {} file"
SAVE_DIALOG_TITLE = "Save as {} file"
| [
11748,
28686,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
11,
3275,
3524,
198,
198,
6738,
17310,
82,
13,
28712,
62,
16963,
457,
1330,
1265,
28712,
198,
198,
38715,
5883,
3525,
62,
34219,
796,
28686,
13,
6978,
13,
11201,
392,
7220,... | 2.688525 | 122 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import torch
import torch.nn.functional as F
from typing import Optional
from torch import Tensor
# flags required to enable jit fusion kernels
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
@torch.jit.script
@torch.jit.script
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
19720,
1330,
32233,
198,
6738,
28034,
1330,
309,
22854,
198,
198,
2,
9701,
2672,
284,
7139,
474,
270,
21748,
50207,
198,
13165,
354,
13557,
34,
13557,
45051,
62... | 2.734848 | 132 |
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Optional
import click
from zenml.cli import utils as cli_utils
from zenml.cli.cli import cli
from zenml.core.repo import Repository
@cli.group("container-registry")
def container_registry() -> None:
"""Utilities for container registries."""
@container_registry.command("get")
def get_active_container_registry() -> None:
"""Gets the container registry of the active stack."""
name = Repository().get_active_stack().container_registry_name
if name:
cli_utils.declare(f"Active container registry: {name}")
else:
cli_utils.declare(
f"No container registry set for current active stack: {Repository().get_active_stack_key()}"
)
@container_registry.command(
"register", context_settings=dict(ignore_unknown_options=True)
)
@click.argument(
"name",
required=True,
type=click.STRING,
)
@click.option(
"--uri",
"-u",
help="The URI for the container registry to register.",
required=True,
type=click.STRING,
)
def register_container_registry(name: str, uri: str) -> None:
"""Register a container registry."""
from zenml.container_registries import BaseContainerRegistry
repo = Repository()
registry = BaseContainerRegistry(uri=uri, repo_path=repo.path)
repo.get_service().register_container_registry(name, registry)
cli_utils.declare(f"Container registry `{name}` successfully registered!")
@container_registry.command("list")
def list_container_registries() -> None:
"""List all available container registries from service."""
repo = Repository()
service = repo.get_service()
if len(service.container_registries) == 0:
cli_utils.warning("No container registries registered!")
return
active_container_registry = str(
repo.get_active_stack().container_registry_name
)
service = repo.get_service()
cli_utils.title("Container registries:")
cli_utils.print_table(
cli_utils.format_component_list(
service.container_registries, active_container_registry
)
)
@container_registry.command(
"describe",
help="Show details about the current active container registry.",
)
@click.argument(
"container_registry_name",
type=click.STRING,
required=False,
)
def describe_container_registry(
container_registry_name: Optional[str],
) -> None:
"""Show details about the current active container registry."""
repo = Repository()
container_registry_name = container_registry_name or str(
repo.get_active_stack().container_registry_name
)
container_registries = repo.get_service().container_registries
if len(container_registries) == 0:
cli_utils.warning("No container registries registered!")
return
try:
container_registry_details = container_registries[
container_registry_name
]
except KeyError:
cli_utils.error(
f"Container registry `{container_registry_name}` does not exist."
)
return
cli_utils.title("Container Registry:")
if (
repo.get_active_stack().container_registry_name
== container_registry_name
):
cli_utils.declare("**ACTIVE**\n")
else:
cli_utils.declare("")
cli_utils.declare(f"NAME: {container_registry_name}")
cli_utils.print_component_properties(container_registry_details.dict())
@container_registry.command("delete")
@click.argument("container_registry_name", type=str)
def delete_container_registry(container_registry_name: str) -> None:
"""Delete a container registry."""
service = Repository().get_service()
service.delete_container_registry(container_registry_name)
cli_utils.declare(f"Deleted container registry: {container_registry_name}")
| [
2,
220,
15069,
357,
66,
8,
14760,
5805,
402,
2022,
39,
33448,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
... | 2.825336 | 1,563 |