content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
The module, which contains all the classes which are directly concerned with the search action.
This includes the classes SearchResult, SearchBatch, SearchOptions and the batching strategies
"""
import os
import csv
import json
import datetime
from collections import deque
from typing import Tuple, Type, List, Union, Any, Dict
import requests
from tempfile import TemporaryDirectory, TemporaryFile
from zipfile import ZipFile
from pykitopen.publication import Publication, PublicationView
from pykitopen.util import csv_as_dict_list, unzip_bytes
from pykitopen.mixins import DictTransformationMixin
# INTERFACES AND ABSTRACT CLASSES
# ###############################
class BatchingStrategy:
"""
This is the interface for request batching strategy. A BatchingStrategy object acts as a function, by having to
implement the __call__ method. This call method has to return a list of SearchBatch objects. A batching strategy
is constructed by passing the config dict of the overall configuration for the KitOpen wrapper as well as the
options dict, which defines the parameters for a single search action.
Background
----------
So what is a batching strategy even doing in the broader context of the whole package and why is it important?
For the requests to the KITOpen database it is important, that there is a functionality, which breaks down request
for large amounts of data into smaller individual request, because for large requests there is the chance that the
server will take way to long thus running the request into a timeout.
So the search batching strategies are classes, which essentially represent different methods of dividing a big
request into smaller requests.
"""
def __call__(self):
"""
Returns a list of SearchBatch objects, which have been created according to the described strategy from the
basic "options" dict passed to this object
:return: List[SearchBatch]
"""
raise NotImplementedError()
class SearchOptions:
"""
This class represents the parameters which are passed to the `search` action of the `KitOpen` wrapper.
The `search` method itself expects a dict to be passed to it to define the options for the search, but this dict
is then converted internally to a `SearchOptions` object, as it wraps some important functionality to be executed
on these options.
"""
# CLASS CONSTANTS
# ---------------
_ARGS = ['author', 'start', 'end', 'view']
"""A list of the string keys, which are possible to pass as options"""
_DEFAULT_CONFIG = {
'default_view': Publication.VIEWS.BASIC,
'default_start': '2000',
'default_end': '',
'default_author': 'MUSTERMANN, M*'
}
def __init__(self,
author: Union[List[str], str],
start: str,
end: str,
view: PublicationView):
"""
The constructor.
Design Choice
-------------
I have made the design choice to make the constructor of this object expect every possible search option as a
positional argument explicitly, instead of having the constructor accept the dict. The primary way to create
this object will still be using the "from_dict" class method, which does exactly as it sounds, but by defining
the arguments explicitly, it is more concise and obvious right away what the search options actually includes.
:param author:
:param start:
:param end:
:param view:
"""
self.author = author
self.start = start
self.end = end
self.view = view
# The SearchParametersBuilder is the object which manages the construction of the dict, which will then be
# used as the GET parameters for the actual network request from the simplified options passed to the search
# method of the wrapper.
self.parameters_builder = SearchParametersBuilder()
self.parameters_builder.set_options(self.to_dict())
# PUBLIC METHODS
# --------------
def to_parameters(self) -> Dict[str, Any]:
"""
Returns the dict, which is used as the GET parameters for the actual network request to the database
:return:
"""
return self.parameters_builder.get_parameters()
def to_dict(self) -> Dict[str, Any]:
"""
Returns the search options as a dict
:return:
"""
return {
'author': self.author,
'start': self.start,
'end': self.end,
'view': self.view
}
@classmethod
def from_dict(cls, data: dict, config: dict = _DEFAULT_CONFIG):
"""
Creates a new `SearchOptions` object from the given data dict and an optional config.
The config is also part of creating the dict, because it has to be possible to also supply an options dict to
the `search` method which only contains a subset of all possible options. All the missing options are then
substituted by their default values. And those default values can be customized within the config...
:param data:
:param config:
:return:
"""
kwargs = {}
for key in cls._ARGS:
default_key = f'default_{key}'
kwargs[key] = data[key] if key in data.keys() else config[default_key]
return SearchOptions(**kwargs)
# PROTECTED METHODS
# -----------------
# MAGIC METHODS
# -------------
class SearchBatch:
"""
The SearchBatch class represents one of the parts of a search request to the KITOpen database. The SearchBatch
objects are the actual lowest layer of abstraction, which actually execute the network request to the database and
handle the processing of the response.
Processing the response
-----------------------
The response, which is returned by the KITOpen database is a little bit different than the usual REST API. KITOpen
has chosen to only export the detailed publication data in the form of a ZIP file, which in turn contains a CSV,
which actually contains the data. Thus the reponse has to be processed by first unpacking the downloaded ZIP file
into a temp folder and then parsing the CSV for the data.
Iterator
--------
The SearchBatch class implements the magic methods to act as an iterator, which will simply return all the
`Publication` objects, which have been processed from the response of the request
.. code:: python
batch = SearchBatch(config, options)
batch.execute()
for publication in batch:
print(publication)
"""
# CLASS CONSTANTS
# ---------------
# These constants define the names of the files, which are contained within the zip file, which is returned as a
# response of the KITOpen server. These file names are always the same and have to be known to read the files for
# the data they contain
PUBLICATIONS_FILE_NAME = 'Publikationen.csv'
ANALYSIS_FILE_NAME = 'Analyse.csv'
# PUBLIC METHODS
# --------------
def execute(self):
"""
Actually executes the search batch, by sending the request to the server and processing the response
:raises ConnectionError: In case anything with the request went wrong
:return:
"""
self.response = self.send_request()
if self.response.status_code == 200:
# The 'unzip_bytes' function takes the binary string, which represents a ZIP file unzips the content of
# this file into a TemporaryDictionary and then returns the object, which describes this temp folder
temp_dir: TemporaryDirectory = unzip_bytes(self.response.content)
# The function "csv_as_dict_list" does exactly how it sounds it takes the path of a csv file and returns
# a list of dicts, where each dict represents a single row in the csv file, the keys being the headers
# of the scv rows.
publications_file_path = os.path.join(temp_dir.name, self.PUBLICATIONS_FILE_NAME)
publications_rows = csv_as_dict_list(publications_file_path)
self.publications = self._get_publications_from_rows(publications_rows, self.options.view)
self.length = len(self.publications)
self.success = True
else:
raise ConnectionError()
def send_request(self) -> requests.Response:
"""
This method actually sends the request to the KITOpen server and returns the response.
:return: requests.Response
"""
# The url of the KITOpen server is defined as part of the overall config dict
url = self.config['search_url']
# The parameters for the GET request to the server are directly derived from the options dict passed to the
# search action. The ".to_parameter" method performs this conversion.
parameters = self.options.to_parameters()
return requests.get(
url=url,
params=parameters
)
# PROTECTED METHODS
# -----------------
@classmethod
def _get_publications_from_rows(cls,
rows: List[Dict[str, Any]],
publication_view: PublicationView) -> List[Publication]:
"""
Given a list of dicts, where each dict describes a publication and the PublicationView object, which was used
to retrieve these publications, this method will return a list of Publication objects, which contain the data
from the dicts and the keys according to the given view.
:param rows:
:param publication_view:
:return:
"""
publications = []
for row in rows:
_publication = Publication(row, publication_view)
publications.append(_publication)
return publications
# MAGIC METHODS
# -------------
def __bool__(self):
"""
The boolean state of this object evaluates to whether or not the request was successful
:return:
"""
return self.success
def __next__(self) -> Publication:
"""
This method implements the functionality of being able to iterate a SearchBatch object.
For each call to the next function this will simply go through the internal list of publications.
:raises AssertionError: If the request to the server was not successful
:return:
"""
assert self.response is not None, "The batch has to be executed first, before it can be iterated"
assert self.success, "The search request has to be successful to be iterated"
publication = self.publications[self.index]
self.index += 1
if self.index >= self.length:
raise StopIteration
return publication
class SearchResult:
"""
This class represents the result of a search action. A `SearchResult` object is in fact returned for every call to
the `search` method of the `KitOpen` wrapper.
Iterator
--------
The `SearchResult` class implements the necessary methods to act as an iterator, which returns all the publications
that have been returned by the class.
.. code:: python
from pykitopen import KitOpen
from pykitopen.config import DEFAULT
pykitopen = KitOpen(DEFAULT)
results: SearchResult = pykitopen.search()
for publication in results:
print(publication)
"""
# PUBLIC METHODS
# --------------
def create_batches(self) -> List[SearchBatch]:
"""
Returns a list of SearchBatch objects, which have been created according to the config.
:return:
"""
# The '_create_batches' is a class method which creates the list of SearchBatch objects according to the
# BatchingStrategy defined in the config.
return self._create_batches(
self.config,
self.options
)
# PROTECTED METHODS
# -----------------
@classmethod
def _create_batches(cls,
config: dict,
options: SearchOptions) -> List[SearchBatch]:
"""
Returns a list of SearchBatch objects, which have been created from the passed search options and the
BatchingStrategy defined in the config dict.
:return:
"""
# The 'batching_strategy' field of the config is supposed to contain a class(!) which implements the
# BatchingStrategy interface. Objects of this type accept the config and the options as contruction arguments
# and can be called directly. This call will return a list of SearchBatch objects, which have been created
# from the overall options according to some criterion.
strategy_class = config['batching_strategy']
strategy = strategy_class(config, options)
return strategy()
# MAGIC METHODS
# -------------
def __next__(self) -> Publication:
"""
Returns the next `Publication` object in the list of results from the request, when calling the next() function
on the object
Implementation
--------------
So the `SearchResult` class is actually not concerned with the actual network request to the database server.
It does not contain the list of publications itself. The actual requests are managed by the `SearchBatch`
objects. This class only manages a list of all these search batches.
So the implementation of the iterator works by executing the next search batch at the time, at which it is
needed and then for each next() call to the `SearchResult` object itself a next call to the current search
batch will return the actual publication, which is then also returned by this method. If the current batch
runs out of publications the next one is executed and then set as the current one etc...
:return:
"""
try:
batch = self.batches[self.index]
if not bool(batch):
batch.execute()
publication = next(batch)
except StopIteration:
self.index += 1
if self.index >= self.length:
raise StopIteration
batch = self.batches[self.index]
if not bool(batch):
batch.execute()
publication = next(batch)
return publication
# DIFFERENT BATCHING STRATEGIES
# #############################
class NoBatching(BatchingStrategy):
"""
Implements the `BatchingStrategy` interface.
This class defines the most simple batching strategy, which is no batching at all. This strategy will not divide
the request at all, it will simply take the search options and create a single SearchBatch object from it.
"""
def __call__(self) -> List[SearchBatch]:
"""
Will return a list with a single SearchBatch element, which represents the entire search request.
:return:
"""
return [SearchBatch(self.config, self.options)]
class YearBatching(BatchingStrategy):
"""
Implements the `BatchingStrategy` Interface
This class defines a batching strategy, which will divide the search options by the individual years, which are
included in the search. A SearchBatch will be created for each year within the given time span.
"""
| [
37811,
198,
464,
8265,
11,
543,
4909,
477,
262,
6097,
543,
389,
3264,
5213,
351,
262,
2989,
2223,
13,
198,
198,
1212,
3407,
262,
6097,
11140,
23004,
11,
11140,
33,
963,
11,
11140,
29046,
290,
262,
15458,
278,
10064,
198,
37811,
198,
... | 2.888009 | 5,429 |
'''
Created on 15 déc. 2021
@author: slinux
'''
from .wxRavenGeneralDesign import *
import wx.lib as lib
import wx.lib.mixins.listctrl as listmix
import wx.aui
import wx.lib.mixins.listctrl as listmix
class RavenErrorLogConsole(wxRavenErrorLogConsolePanel, listmix.ColumnSorterMixin):
'''
classdocs
'''
view_base_name = "Error Log Console"
view_name = "Error Log Console"
parent_frame = None
default_position = "mgr"
#icon = wx.Bitmap( u"res/default_style/normal/error_log.png", wx.BITMAP_TYPE_ANY )
icon = "error_console"
allIcons = {}
message_type_mapping = {}
def __init__(self, parentFrame, position = "toolbox1", viewName= "Error Log Console"):
'''
Constructor
'''
super().__init__(parent=parentFrame)
self.view_base_name = "Error Log Console"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._display = []
#self._msg = True
#self._warning = True
#self._error = True
self._DebugWindow = None
self.itemDataMap = {}
self.allIcons = { }
self.message_type_mapping = {}
self._listInit = False
self.InitBasicMapping()
self.InitPlugingAndVariousMapping()
self.InitConsoleLog()
self.InitToolbar()
parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#parentFrame.Bind( wx.aui.EVT_AUI_PANE_RESTORE, self.OnAuiPaneRestore )
#parentFrame.Bind( wx.aui.EVT_AUI_PANE_ACTIVATED, self.OnAuiPaneActivated )
#parentFrame.Bind( wx.aui.EVT_AUI_RENDER, self.OnAuiPaneRender )
#parentFrame.Bind( wx.aui.EVT_AUI_PANE_CLOSE, self.OnAuiPaneClose )
self._logCurrentCursor = -1
#self.dummyTest()
self.SetAutoLayout(True)
#self.m_auiToolBar1.ToggleTool(self.m_showWarnings.GetId(), True)
"""
def OnAuiPaneClose(self, evt):
print("OnAuiPaneClose in console log")
def OnAuiPaneRestore(self, evt):
print("OnAuiPaneRestore in console log")
def OnAuiPaneRender(self, evt):
print("OnAuiPaneRender in console log")
def OnAuiPaneActivated(self, evt):
print("OnAuiPaneActivated in console log")
"""
'''
Virtualization Update
'''
# Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py
# Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py
| [
7061,
6,
198,
41972,
319,
1315,
39073,
66,
13,
33448,
198,
198,
31,
9800,
25,
1017,
259,
2821,
198,
7061,
6,
198,
198,
6738,
764,
49345,
49098,
12218,
23067,
1330,
1635,
628,
198,
11748,
266,
87,
13,
8019,
355,
9195,
198,
11748,
266... | 1.866223 | 1,652 |
import FWCore.ParameterSet.Config as cms
# make patCandidates
from PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff import *
# make selectedPatCandidates
from PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff import *
# make cleanPatCandidates
from PhysicsTools.PatAlgos.cleaningLayer1.cleanPatCandidates_cff import *
# count cleanPatCandidates (including total number of leptons)
from PhysicsTools.PatAlgos.selectionLayer1.countPatCandidates_cff import *
patDefaultSequence = cms.Sequence(
patCandidates *
selectedPatCandidates *
cleanPatCandidates *
countPatCandidates
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
2,
787,
1458,
41572,
37051,
198,
6738,
23123,
33637,
13,
12130,
2348,
70,
418,
13,
1676,
41213,
49925,
16,
13,
8071,
41572,
37051,
62,
66,
487,
1330,
1635,
198,... | 3.170103 | 194 |
# python server.py
# localhost:5000
import time
from flask import Flask, request, abort
from datetime import datetime as dt
app = Flask(__name__)
db = [
{
'name': 'Jack',
'text': 'Hello all',
'time': time.time()
},
{
'name': 'Mick',
'text': 'Hello Jack',
'time': time.time()
}
]
@app.route("/")
@app.route("/status")
@app.route("/send", methods=['POST'])
@app.route("/messages")
app.run()
| [
2,
21015,
4382,
13,
9078,
198,
2,
1957,
4774,
25,
27641,
198,
11748,
640,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
15614,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
... | 2.193396 | 212 |
#!/usr/bin/env python
#coding:utf-8
from flask import Flask, request
from . import app,jsonrpc
import utils
from auth import auth_login
import json,time,traceback
@jsonrpc.method('selected.get')
@auth_login
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
25,
40477,
12,
23,
198,
6738,
42903,
1330,
46947,
11,
2581,
198,
6738,
764,
1330,
598,
11,
17752,
81,
14751,
198,
11748,
220,
3384,
4487,
198,
6738,
6284,
1330,
6284,
62... | 2.930556 | 72 |
from . import eggs
| [
6738,
764,
1330,
9653,
198
] | 3.8 | 5 |
import sqlite3
conn = sqlite3.connect('Data.db')
c = conn.cursor() | [
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
6601,
13,
9945,
11537,
198,
66,
796,
48260,
13,
66,
21471,
3419
] | 2.576923 | 26 |
"""
Contains pytest fixtures to be used by tests.
"""
from docker import Client
from docker.utils import kwargs_from_env
import pytest
from dockerrotate.main import main as docker_rotate_main
from imagetools import ImageFactory
from containertools import ContainerFactory
@pytest.fixture(scope="module")
@pytest.fixture
def docker_rotate(pytestconfig):
"""A wrapper around dockerrotate.main that injects --client-version when supplied."""
return _docker_rotate
@pytest.yield_fixture
@pytest.yield_fixture
| [
37811,
198,
4264,
1299,
12972,
9288,
34609,
284,
307,
973,
416,
5254,
13,
198,
37811,
198,
6738,
36253,
1330,
20985,
198,
6738,
36253,
13,
26791,
1330,
479,
86,
22046,
62,
6738,
62,
24330,
198,
11748,
12972,
9288,
198,
198,
6738,
36253,... | 3.275 | 160 |
import os
BASE_DIR: str = os.getcwd()
MEDIA_DIR: str = os.path.join(BASE_DIR, 'media/temp/') | [
11748,
28686,
198,
198,
33,
11159,
62,
34720,
25,
965,
796,
28686,
13,
1136,
66,
16993,
3419,
198,
30733,
3539,
62,
34720,
25,
965,
796,
28686,
13,
6978,
13,
22179,
7,
33,
11159,
62,
34720,
11,
705,
11431,
14,
29510,
14,
11537
] | 2.214286 | 42 |
from invoke import run
| [
6738,
26342,
1330,
1057,
628
] | 4.8 | 5 |
my_list = [0, 1, 2, 3, 4, 5, 6]
my_iterator = iter(my_list)
for char in range(len(my_list)):
print(next(my_iterator)) | [
1820,
62,
4868,
796,
685,
15,
11,
352,
11,
362,
11,
513,
11,
604,
11,
642,
11,
718,
60,
201,
198,
1820,
62,
48727,
796,
11629,
7,
1820,
62,
4868,
8,
201,
198,
201,
198,
1640,
1149,
287,
2837,
7,
11925,
7,
1820,
62,
4868,
8,
... | 2.1 | 60 |
"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
from __future__ import absolute_import
import numpy
import contextlib
def tolist_deeply(value):
"""
Fully convert a numpy object into a list.
"""
if isinstance(value, numpy.ndarray):
return map(tolist_deeply, value.tolist())
elif isinstance(value, list):
return map(tolist_deeply, value)
elif isinstance(value, tuple):
return tuple(map(tolist_deeply, value))
else:
return value
def normalize_dtype(dtype):
"""
Construct an equivalent normal-form dtype.
Normal-form dtypes are guaranteed to satisfy, in particular, the property
of "shape greediness": the dtype's base property, if non-None, refers to a
type with empty shape.
"""
if dtype.shape:
normal_base = normalize_dtype(dtype.base)
return numpy.dtype((normal_base.base, dtype.shape + normal_base.shape))
else:
return dtype
def semicast(*arrays):
"""
Broadcast compatible ndarray shape prefixes.
"""
# establish the final prefix shape
pre_ndim = max(len(a.shape[:i]) for (a, i) in arrays)
pre_padding = [(1,) * (pre_ndim - len(a.shape[:i])) for (a, i) in arrays]
pre_shape = tuple(map(max, *(p + a.shape[:i] for ((a, i), p) in zip(arrays, pre_padding))))
# broadcast the arrays
from numpy.lib.stride_tricks import as_strided
casts = []
for ((a, i), p) in zip(arrays, pre_padding):
if i is None:
i = len(a.shape)
for (c, d) in zip(pre_shape[len(p):], a.shape[:i]):
if c != d and d != 1:
raise ValueError("array shapes incompatible for semicast")
strides = (0,) * len(p) + tuple(0 if d == 1 else s for (d, s) in zip(a.shape, a.strides))
casts += [as_strided(a, pre_shape + a.shape[i:], strides)]
# repair dtypes (broken by as_strided)
for ((a, _), cast) in zip(arrays, casts):
cast.dtype = a.dtype
# done
return (pre_shape, casts)
@contextlib.contextmanager
| [
37811,
198,
31,
9800,
25,
17857,
7698,
400,
1211,
1279,
65,
6359,
31,
66,
9448,
12,
40820,
13,
2398,
29,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
299,
32152,
198,
11748,
4732,
8019,
198,
... | 2.388112 | 858 |
from turkishnlp import detector
obj = detector.TurkishNLP()
obj.download()
obj.create_word_set()
| [
6738,
7858,
31501,
21283,
79,
1330,
31029,
198,
26801,
796,
31029,
13,
42872,
45,
19930,
3419,
198,
26801,
13,
15002,
3419,
198,
26801,
13,
17953,
62,
4775,
62,
2617,
3419,
628
] | 3.16129 | 31 |
import json
import json_utils
if __name__ == '__main__':
import fire
fire.Fire(MondlyCorpus)
| [
11748,
33918,
198,
11748,
33918,
62,
26791,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
2046,
198,
220,
220,
220,
2046,
13,
13543,
7,
44,
623,
306,
45680,
385,
8,
628
] | 2.575 | 40 |
from .classification import Classification
from .ranking import Ranking
| [
6738,
764,
4871,
2649,
1330,
40984,
198,
6738,
764,
28405,
1330,
45407,
198
] | 5.538462 | 13 |
# import data
text_file = open('ex1data1.txt', 'r')
# data = [ text_file.readline() for i in range len(text_file.readlines()) ]
data = text_file.readlines()
data2 = []
with open ('ex1data1.txt') as file:
for line in file:
data2.append(line)
file.close
x = []
y = []
for item in data:
x.append(item.split(',')[0])
y.append(item.split(',')[1][0:-1])
# print (data2)
# print (x)
# print (y)
# scatter plot
import matplotlib.pyplot as plt
plt.scatter(x, y, marker='x', c='r')
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s')
| [
2,
1330,
1366,
198,
198,
5239,
62,
7753,
796,
1280,
10786,
1069,
16,
7890,
16,
13,
14116,
3256,
705,
81,
11537,
198,
2,
1366,
796,
685,
2420,
62,
7753,
13,
961,
1370,
3419,
329,
1312,
287,
2837,
18896,
7,
5239,
62,
7753,
13,
961,
... | 2.333333 | 246 |
# Block size
BLOCK_SIZE = 20
X_MIN = 0
X_MAX = 24
Y_MIN = 0
Y_MAX = 24
# Screen size constants
SCREEN_WIDTH = BLOCK_SIZE * (X_MAX - X_MIN + 1)
SCREEN_HEIGHT = BLOCK_SIZE * (Y_MAX - Y_MIN + 1)
SNAKE_COLOR = (30, 150, 30)
APPLE_COLOR = (200, 50, 50)
| [
2,
9726,
2546,
201,
198,
9148,
11290,
62,
33489,
796,
1160,
201,
198,
201,
198,
55,
62,
23678,
796,
657,
201,
198,
55,
62,
22921,
796,
1987,
201,
198,
201,
198,
56,
62,
23678,
796,
657,
201,
198,
56,
62,
22921,
796,
1987,
201,
1... | 1.956522 | 138 |
from django.conf import settings
from django.utils.module_loading import import_string
from django.template.loader import render_to_string
from django.core import mail
from django.template import engines
from .models import MessageTemplate
# work in progress. This will become the preferred way to send messages:
def send_template(to, sender, template_id, context, template_owner):
'''
usage:
send_template(
to=Recipient.from_user(user),
sender=Sender.from_user(sender_user),
template_id='hello-world',
context={},
template_owner=user)
'''
msg = Templatizer().get_message_template(template_id, template_owner)
return CommunicationClient(msg.preferred_delivery_method).send_template(
to=to,
sender=sender,
template_id=template_id,
context=context
) | [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
26791,
13,
21412,
62,
25138,
1330,
1330,
62,
8841,
198,
6738,
42625,
14208,
13,
28243,
13,
29356,
1330,
8543,
62,
1462,
62,
8841,
198,
6738,
42625,
14208,
13,
7295,... | 2.76873 | 307 |
#!/usr/bin/env python
if __name__ == "__main__":
# Pour les tests...
import sys
print Parse(sys.argv[1]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
130... | 1.971014 | 69 |
from rest_framework.generics import CreateAPIView
from connect.api.serializers import LNMOnlineSerializer
from connect.models import LNMOnline
| [
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
13610,
2969,
3824,
769,
198,
6738,
2018,
13,
15042,
13,
46911,
11341,
1330,
406,
32755,
14439,
32634,
7509,
198,
6738,
2018,
13,
27530,
1330,
406,
32755,
14439,
628,
628
] | 3.945946 | 37 |
import os
import posixpath
import csv
import array as arr
import sys
if __name__ == '__main__':
main() | [
11748,
28686,
201,
198,
11748,
1426,
844,
6978,
201,
198,
11748,
269,
21370,
220,
201,
198,
11748,
7177,
355,
5240,
201,
198,
11748,
25064,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
... | 2.352941 | 51 |
"""Tests for 3rd party Connect helpers"""
| [
37811,
51,
3558,
329,
513,
4372,
2151,
8113,
49385,
37811,
198
] | 3.818182 | 11 |
import importlib
| [
11748,
1330,
8019,
628,
198
] | 3.8 | 5 |
"""
Standardize Mobile Number Using Decorators
https://www.hackerrank.com/challenges/standardize-mobile-number-using-decorators/problem
"""
@wrapper
if __name__ == '__main__':
l = [input() for _ in range(int(input()))]
sort_phone(l)
| [
37811,
198,
23615,
1096,
12173,
7913,
8554,
4280,
273,
2024,
198,
198,
5450,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
20307,
1096,
12,
24896,
12,
17618,
12,
3500,
12,
12501,
273,
2024,
14,
45573,
198,
37811,
62... | 2.764045 | 89 |
import os, sys, argparse, time
import json
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.data import DataLoader
from tensorboardX import SummaryWriter
sys.path.append('..')
from models.ddi_predictor import DDIPredictor
from models.series_gin_edge import SerGINE
from loader import DDIDataset
from chem import *
from utils import *
args = parse_args()
start_time = time.time()
output_dir = args.gnn+'_dim'+str(args.emb_dim)
output_dir = os.path.join(args.output_dir, args.dataset, output_dir,
'margin'+str(args.margin) + '_lr0_'+str(args.lr0) + '_lr1_'+str(args.lr1) + '_dropout'+str(args.dropout),
'time'+str(args.time))
if args.from_scratch:
output_dir = os.path.join(output_dir, 'scratch')
ext_setting = None
if args.weight_decay > 0:
output_dir = os.path.join(output_dir, 'decay'+str(args.weight_decay))
if __name__ == '__main__':
main()
| [
11748,
28686,
11,
25064,
11,
1822,
29572,
11,
640,
198,
11748,
33918,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
1... | 2.447689 | 411 |
## DranoTheCat's Cylon
##
## Boop to change colors
##
##
from random import randrange
import emote
import dcfurs
import badge
import utime
| [
2235,
1583,
5733,
464,
21979,
338,
327,
15158,
198,
2235,
198,
2235,
3248,
404,
284,
1487,
7577,
198,
2235,
198,
2235,
198,
198,
6738,
4738,
1330,
43720,
9521,
198,
11748,
795,
1258,
198,
11748,
288,
12993,
1834,
198,
11748,
23009,
198,... | 3.111111 | 45 |
import sys, os
from base.Level import Level
from base.Translate import Translate
from qct_tools.utils.FileUtils import FileUtils
start_index = 0
end_index = 0
N = 999999
if __name__ == '__main__':
if len(sys.argv) == 3:
start_index = int(sys.argv[1])
end_index = int(sys.argv[2])
process(start_index, end_index)
| [
11748,
25064,
11,
28686,
198,
198,
6738,
2779,
13,
4971,
1330,
5684,
198,
6738,
2779,
13,
8291,
17660,
1330,
3602,
17660,
198,
6738,
10662,
310,
62,
31391,
13,
26791,
13,
8979,
18274,
4487,
1330,
9220,
18274,
4487,
198,
198,
9688,
62,
... | 2.395833 | 144 |
from io import BytesIO
from warnings import warn
from re import sub
from base64 import b64encode, b64decode
from pathlib import Path
# Specify which functions to be imported to be used with nv as nv.__functionname__()
# Functions included in __all__ will be imported on calling "from nsvision.image_utils import *"
# While creating a new function, don't forget to mention below incase you want to use with nv as nv.new_func()
__all__ = [
"imread",
"imurl",
"expand_dims",
"reduce_dims", # functions having numpy array operations
"imshow",
"get_image_from_array", # functions retriving / displaying images
"imsave", # functions writing image
"base64_to_bytes",
"image_to_base64", # functions performing base64 operations
]
try:
import requests as request_image
except ImportError:
request_image = None
try:
from PIL import Image as pilimage
from numpy import asarray as toarray
from numpy import expand_dims as expand
from numpy import squeeze
from numpy import max as npmax
from numpy import min as npmin
except ImportError:
raise ImportError(
"Could not import PIL.Image. or Numpy "
"This library requires PIL >= 7.0.0 and numpy >= 1.18.1"
)
__interpolation_methods__ = {
"nearest": pilimage.NEAREST,
"bilinear": pilimage.BILINEAR,
"bicubic": pilimage.BICUBIC,
"hamming": pilimage.HAMMING,
"box": pilimage.BOX,
"lanczos": pilimage.LANCZOS,
}
def imread(
image_path,
resize=None,
color_mode=None,
interpolation="nearest",
dtype="float32",
return_original=False,
normalize=False,
maintain_aspect_ratio=False,
):
"""Converts a PIL Image instance to a Ndarray optimised for model.
Parameters
----------
image_path: Image Path or bytes.
resize: (width,height) tuple
color_mode: default is None
you can also use color_mode as `rgb` or `rgba` or `grayscale`
interpolation:
Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", "bicubic".
"hamming", "box" and "lanczos".
Default: "nearest".
dtype: Dtype to use for the returned array.
Default: float32
return_original: Returns original image array along with resized image array.
Default: False
Note: This parameter only works with resize parameter
normalize: Returns normalized image if set to True
Default: False
maintain_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `image_path` or `resize` or `color_mode` or `interpolation` or `dtype` is passed.
ValueError: if return_original is True and resize is None
"""
if isinstance(image_path, BytesIO):
image = pilimage.open(image_path)
elif isinstance(image_path, (Path, bytes, str)):
if isinstance(image_path, Path):
image_path = str(image_path.resolve())
with open(image_path, "rb") as f:
image = pilimage.open(BytesIO(f.read()))
else:
raise TypeError(
"path should be path-like or io.BytesIO" ", not {}".format(type(image_path))
)
if color_mode is not None:
if color_mode == "grayscale":
if image.mode not in ("L", "I;16", "I"):
image = image.convert("L")
elif color_mode == "rgba":
if image.mode != "RGBA":
image = image.convert("RGBA")
elif color_mode == "rgb":
if image.mode != "RGB":
image = image.convert("RGB")
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if resize is not None:
width_height_tuple = (resize[1], resize[0])
if image.size != width_height_tuple:
original_image_array = toarray(image, dtype=dtype)
if interpolation not in __interpolation_methods__:
raise ValueError(
"Invalid interpolation method {} specified. Supported "
"methods are {}".format(
interpolation, ", ".join(__interpolation_methods__.keys())
)
)
resample = __interpolation_methods__[interpolation]
if maintain_aspect_ratio:
width, height = image.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart,
crop_box_hstart,
crop_box_wend,
crop_box_hend,
]
image = image.resize(width_height_tuple, resample, box=crop_box)
else:
image = image.resize(width_height_tuple, resample)
image_array = toarray(image, dtype=dtype)
if normalize:
image_array /= 255.0
if return_original:
if resize is None:
raise ValueError(
"return_original parameter only works with resize parameter"
)
return original_image_array, image_array
return image_array
def imurl(image_url, return_as_array=False, **kwargs):
"""
Read image from url and convert to bytes or ndarray
Paramters
---------
image_url: http / https url of image
return_as_array: Convert image directly to numpy array
default: False
kwargs:
Keyword arguments of imread can be passed for image modification:
Example:
imurl(image_url,to_array=True,resize=(224,224),color_mode = 'rgb',dtype='float32')
Note: kwargs only works with return_as_array = True
Returns:
--------
PIL Image by default:
if return_as_array is True:
image will be returned as numpy array.
Additional params like resize, color_mode, dtype , return_original can also be passed inorder to refine the image
Raises:
-------
ImportError if requests library is not installed
"""
if request_image is None:
raise ImportError(
"requests library is required from reading image from url "
"Install it using pip install requests"
)
if not image_url.startswith("http"):
raise ValueError(
f"invalid url found. Required http or https url but got {image_url} instead"
)
image_response = request_image.get(image_url)
imbytes = BytesIO(image_response.content)
if return_as_array:
return imread(imbytes, **kwargs)
image = pilimage.open(imbytes)
return image
def expand_dims(array, axis=0, normalize=False):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
Parameters
----------
array : numpy array.
axis : int or tuple of ints
Position in the expanded axes where the new axis is placed
normalize:
True : return normalized image
False : return just image array with expanded dimensions
"""
array = expand(array, axis=axis)
if normalize:
array /= 255.0
return array
def get_image_from_array(
img_array, denormalize=True, dtype="float32", is_cv2_image=False
):
"""Converts numpy image array to a PIL Image instance.
Parameters
----------
img: Input Numpy image array.
denormalize: Revert back normalized image to unnormalized form
Default: True.
dtype: Dtype to use.
Default: "float32".
is_cv2_image: Set to True if image is loaded using cv2
Default: False
Returns
-------
A PIL Image.
Raises
------
Raises TypeError if image_array is not an numpy ndarray
"""
if not hasattr(img_array, "ndim"):
raise TypeError(
f"Required image_array to be of type numpy.ndarray but got {type(img_array)} instead"
)
if img_array.ndim != 3:
if img_array.ndim == 2:
"""expand image dimensions only if image is 2D grayscale
manually adding channel dimension `1` to image (only for 2D grayscale image)"""
img_array = expand_dims(img_array, axis=2)
else:
raise ValueError(
f"Expected array with 1 or 3 dimensions Got an array with {img_array.ndim} dimension\n"
"Incase you have used expand_dims for preprocessing, use nv.reduce_dims() for reducing expanded dimensions\n"
"make sure to check the axis position while expanding or reducing dimensions."
)
if is_cv2_image: # If numpy array is cv2 image
img_array = img_array[..., ::-1] # Convert BGR to RGB
img_array = toarray(img_array, dtype=dtype)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if denormalize:
img_array = img_array - npmin(img_array)
img_max = npmax(img_array)
if img_max != 0:
img_array /= img_max
img_array *= 255
if img_array.shape[2] == 4: # RGBA Image
return pilimage.fromarray(img_array.astype("uint8"), "RGBA")
elif img_array.shape[2] == 3: # RGB Image
return pilimage.fromarray(img_array.astype("uint8"), "RGB")
elif img_array.shape[2] == 1: # Grayscale image
# Removing the additional dimension we created earlier
img_array = reduce_dims(img_array, axis=2)
if npmax(img_array) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pilimage.fromarray(img_array.astype("int32"), "I")
return pilimage.fromarray(img_array.astype("uint8"), "L")
else:
raise ValueError(f"Channel {img_array.shape[2]} not supported")
def imshow(image, is_cv2_image=False):
"""
Displays image in new window
Parameters
image: PIL or CV2 image array
is_cv2_image: If image_array is processed using cv2
"""
if hasattr(image, "show"):
image.show()
else:
get_image_from_array(image, is_cv2_image=is_cv2_image).show()
def imsave(
path, image, file_format=None, is_cv2_image=False, denormalize=True, **kwargs
):
"""
Write image array or instance to a file.
Parameters
----------
path: Location for writing image file
image: image array
file_format: file_format for image
is_cv2_image: Set to True if image is loaded using cv2
Default: False
denormalize: Set to True if image was normalized during preprocessing
Default: True
kwargs:
other keyword args if required by PIL
"""
if hasattr(image, "save"):
image.save(path, file_format=file_format, **kwargs)
else:
image = get_image_from_array(
image, denormalize=denormalize, is_cv2_image=is_cv2_image
)
if image.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"):
warn("JPG format does not support RGBA images, converting to RGB.")
image = image.convert("RGB")
image.save(path, format=file_format, **kwargs)
def reduce_dims(array, axis=0):
"""
Reduce array dimensions at given axis
Note: If trying on image array please check if expand_dims
is used by you during image preprocessing and axis too.
Parameters
----------
array: numpy nd array
axis : int or tuple of ints
Position in the expanded axes where the new axis is placed.
default: 0
"""
return squeeze(array, axis=axis)
def base64_to_bytes(base64_encoded_image):
"""
Convert base64 image data to PIL image
Parameters
----------
base64_encoded_image: base64 encoded image
Returns
-------
Decoded image as Bytes Array
"""
image_data = sub("^data:image/.+;base64,", "", base64_encoded_image)
return BytesIO(b64decode(image_data))
def image_to_base64(image, file_format="PNG"):
"""
Convert image from array to base64 string
Parameters
----------
image: path or pil image object
file_format: file format of image
Returns
-------
base64 encoded image as string
"""
if isinstance(image, str):
image = get_image_from_array(imread(image))
buffered = BytesIO()
image.save(buffered, format=file_format)
return "data:image/png;base64," + b64encode(buffered.getvalue()).decode("ascii")
| [
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
14601,
1330,
9828,
198,
6738,
302,
1330,
850,
198,
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
11,
275,
2414,
12501,
1098,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
18291,
1958,
... | 2.390653 | 5,606 |
from flask import Flask, jsonify, request, render_template
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
import tensorflow as tf
import os
#---------- URLS AND WEB PAGES -------------#
# Initialize the app
app = Flask(__name__)
image_path = 'ethan.jpg'
# Get an example and return it's score from the predictor model
@app.route("/", methods=["GET", "POST"])
#--------- RUN WEB APP SERVER ------------#
# Start the app server
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
11,
2581,
11,
8543,
62,
28243,
198,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
... | 3.066298 | 181 |
# coding=utf-8
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Task entity that describe when a task is to be scheduled.
This module doesn't do the scheduling itself. It only describes the tasks ready
to be scheduled.
Graph of the schema:
+--------Root-------+
|TaskRequest | task_request.py
| +--------------+ |
| |TaskProperties| |
| | +--------+ | |
| | |FilesRef| | |
| | +--------+ | |
| +--------------+ |
|id=<based on epoch>|
+-------------------+
|
v
+--------------------+
|TaskToRun |
|id=<dimensions_hash>|
+--------------------+
"""
import datetime
import logging
import time
from google.appengine.api import memcache
from google.appengine.ext import ndb
from components import utils
from server import task_pack
from server import task_queues
from server import task_request
### Models.
class TaskToRun(ndb.Model):
"""Defines a TaskRequest ready to be scheduled on a bot.
This specific request for a specific task can be executed multiple times,
each execution will create a new child task_result.TaskResult of
task_result.TaskResultSummary.
This entity must be kept small and contain the minimum data to enable the
queries for two reasons:
- it is updated inside a transaction for each scheduling event, e.g. when a
bot gets assigned this task item to work on.
- all the ones currently active are fetched at once in a cron job.
The key id is the value of 'dimensions_hash' that is generated with
task_queues.hash_dimensions(), parent is TaskRequest.
"""
# Moment by which the task has to be requested by a bot. Copy of TaskRequest's
# TaskRequest.expiration_ts to enable queries when cleaning up stale jobs.
expiration_ts = ndb.DateTimeProperty(required=True)
# Everything above is immutable, everything below is mutable.
# priority and request creation timestamp are mixed together to allow queries
# to order the results by this field to allow sorting by priority first, and
# then timestamp. See _gen_queue_number() for details. This value is only set
# when the task is available to run, i.e.
# ndb.TaskResult.query(ancestor=self.key).get().state==AVAILABLE.
# If this task it not ready to be scheduled, it must be None.
queue_number = ndb.IntegerProperty()
@property
def is_reapable(self):
"""Returns True if the task is ready to be scheduled."""
return bool(self.queue_number)
@property
def request_key(self):
"""Returns the TaskRequest ndb.Key that is parent to the task to run."""
return task_to_run_key_to_request_key(self.key)
### Private functions.
def _gen_queue_number(dimensions_hash, timestamp, priority):
"""Generates a 64 bit packed value used for TaskToRun.queue_number.
Arguments:
- dimensions_hash: 31 bit integer to classify in a queue.
- timestamp: datetime.datetime when the TaskRequest was filed in. This value
is used for FIFO ordering with a 100ms granularity; the year is ignored.
- priority: priority of the TaskRequest. It's a 8 bit integer. Lower is higher
priority.
Returns:
queue_number is a 63 bit integer with dimension_hash, timestamp at 100ms
resolution plus priority.
"""
assert isinstance(dimensions_hash, (int, long)), repr(dimensions_hash)
assert dimensions_hash > 0 and dimensions_hash <= 0xFFFFFFFF, hex(
dimensions_hash)
assert isinstance(timestamp, datetime.datetime), repr(timestamp)
task_request.validate_priority(priority)
# Ignore the year.
year_start = datetime.datetime(timestamp.year, 1, 1)
t = int(round((timestamp - year_start).total_seconds() * 10.))
assert t >= 0 and t <= 0x7FFFFFFF, (
hex(t), dimensions_hash, timestamp, priority)
# 31-22 == 9, leaving room for overflow with the addition.
# 0x3fc00000 is the priority mask.
# It is important that priority mixed with time is an addition, not a bitwise
# or.
low_part = (priority << 22) + t
assert low_part >= 0 and low_part <= 0xFFFFFFFF, '0x%X is out of band' % (
low_part)
high_part = dimensions_hash << 31
return high_part | low_part
def _queue_number_priority(q):
"""Returns the number to be used as a comparision for priority.
The higher the more important.
"""
return q & 0x7FFFFFFF
def _memcache_to_run_key(task_key):
"""Functional equivalent of task_result.pack_result_summary_key()."""
request_key = task_to_run_key_to_request_key(task_key)
return '%x' % request_key.integer_id()
def _lookup_cache_is_taken(task_key):
"""Queries the quick lookup cache to reduce DB operations."""
assert not ndb.in_transaction()
key = _memcache_to_run_key(task_key)
return bool(memcache.get(key, namespace='task_to_run'))
class _QueryStats(object):
"""Statistics for a yield_next_available_task_to_dispatch() loop."""
broken = 0
cache_lookup = 0
deadline = None
expired = 0
hash_mismatch = 0
ignored = 0
no_queue = 0
real_mismatch = 0
too_long = 0
total = 0
def _validate_task(bot_dimensions, deadline, stats, now, task_key):
"""Validates the TaskToRun and update stats.
Returns:
None if the task_key cannot be reaped by this bot.
tuple(TaskRequest, TaskToRun) if this is a good candidate to reap.
"""
# TODO(maruel): Create one TaskToRun per TaskRunResult.
packed = task_pack.pack_request_key(task_key.parent()) + '0'
stats.total += 1
# Verify TaskToRun is what is expected. Play defensive here.
try:
validate_to_run_key(task_key)
except ValueError as e:
logging.error('_validate_task(%s): validation error: %s', packed, e)
stats.broken += 1
return
# Do this after the basic weeding out but before fetching TaskRequest.
if _lookup_cache_is_taken(task_key):
logging.debug('_validate_task(%s): negative cache', packed)
stats.cache_lookup += 1
return
# Ok, it's now worth taking a real look at the entity.
task_future = task_key.get_async()
request_future = task_to_run_key_to_request_key(task_key).get_async()
task = task_future.get_result()
# It is possible for the index to be inconsistent since it is not executed in
# a transaction, no problem.
if not task.queue_number:
logging.debug('_validate_task(%s): was already reaped', packed)
stats.no_queue += 1
request_future.wait()
return
# It expired. A cron job will cancel it eventually. Since 'now' is saved
# before the query, an expired task may still be reaped even if technically
# expired if the query is very slow. This is on purpose so slow queries do not
# cause exagerate expirations.
if task.expiration_ts < now:
# It could be possible to handle right away to not have to wait for the cron
# job, which would save a 30s average delay.
logging.debug(
'_validate_task(%s): expired %s < %s', packed, task.expiration_ts, now)
stats.expired += 1
request_future.wait()
return
# The hash may have conflicts. Ensure the dimensions actually match by
# verifying the TaskRequest. There's a probability of 2**-31 of conflicts,
# which is low enough for our purpose.
request = request_future.get_result()
if not match_dimensions(request.properties.dimensions, bot_dimensions):
logging.debug('_validate_task(%s): dimensions mismatch', packed)
stats.real_mismatch += 1
return
# If the bot has a deadline, don't allow it to reap the task unless it can be
# completed before the deadline. We have to assume the task takes the
# theoretical maximum amount of time possible, which is governed by
# execution_timeout_secs. An isolated task's download phase is not subject to
# this limit, so we need to add io_timeout_secs. When a task is signalled that
# it's about to be killed, it receives a grace period as well.
# grace_period_secs is given by run_isolated to the task execution process, by
# task_runner to run_isolated, and by bot_main to the task_runner. Lastly, add
# a few seconds to account for any overhead.
#
# Give an exemption to the special terminate task because it doesn't actually
# run anything.
if deadline is not None and not request.properties.is_terminate:
if not request.properties.execution_timeout_secs:
# Task never times out, so it cannot be accepted.
logging.debug(
'_validate_task(%s): deadline %s but no execution timeout',
packed, deadline)
stats.too_long += 1
return
hard = request.properties.execution_timeout_secs
grace = 3 * (request.properties.grace_period_secs or 30)
# Allowance buffer for overheads (scheduling and isolation)
overhead = 300
max_schedule = now + datetime.timedelta(seconds=hard + grace + overhead)
if deadline <= max_schedule:
logging.debug(
'_validate_task(%s): deadline and too late %s > %s (%s + %d + %d + '
'%d)',
packed, deadline, max_schedule, now, hard, grace, overhead)
stats.too_long += 1
return
# It's a valid task! Note that in the meantime, another bot may have reaped
# it.
logging.info('_validate_task(%s): ready to reap!', packed)
return request, task
def _yield_pages_async(q, size):
"""Given a ndb.Query, yields ndb.Future that returns pages of results
asynchronously.
"""
next_cursor = [None]
should_continue = [True]
while should_continue[0]:
page_future = q.fetch_page_async(size, start_cursor=next_cursor[0])
result_future = ndb.Future()
page_future.add_immediate_callback(fire, page_future, result_future)
yield result_future
result_future.get_result()
def _get_task_to_run_query(dimensions_hash):
"""Returns a ndb.Query of TaskToRun within this dimensions_hash queue."""
opts = ndb.QueryOptions(keys_only=True, deadline=15)
# See _gen_queue_number() as of why << 31.
return TaskToRun.query(default_options=opts).order(
TaskToRun.queue_number).filter(
TaskToRun.queue_number >= (dimensions_hash << 31),
TaskToRun.queue_number < ((dimensions_hash+1) << 31))
def _yield_potential_tasks(bot_id):
"""Queries all the known task queues in parallel and yields the task in order
of priority.
The ordering is opportunistic, not strict. There's a risk of not returning
exactly in the priority order depending on index staleness and query execution
latency. The number of queries is unbounded.
Yields:
TaskToRun entities, trying to yield the highest priority one first. To have
finite execution time, starts yielding results once one of these conditions
are met:
- 1 second elapsed; in this case, continue iterating in the background
- First page of every query returned
- All queries exhausted
"""
potential_dimensions_hashes = task_queues.get_queues(bot_id)
# Note that the default ndb.EVENTUAL_CONSISTENCY is used so stale items may be
# returned. It's handled specifically by consumers of this function.
start = time.time()
queries = [_get_task_to_run_query(d) for d in potential_dimensions_hashes]
yielders = [_yield_pages_async(q, 10) for q in queries]
# We do care about the first page of each query so we cannot merge all the
# results of every query insensibly.
futures = []
for y in yielders:
futures.append(next(y, None))
while (time.time() - start) < 1 and not all(f.done() for f in futures if f):
r = ndb.eventloop.run0()
if r is None:
break
time.sleep(r)
logging.debug(
'Waited %.3fs for %d futures, %d completed',
time.time() - start, sum(1 for f in futures if f.done()), len(futures))
items = []
for i, f in enumerate(futures):
if f and f.done():
items.extend(f.get_result())
futures[i] = next(yielders[i], None)
items.sort(key=lambda k: _queue_number_priority(k.id()), reverse=True)
# It is possible that there is no items yet, in case all futures are taking
# more than 1 second.
# It is possible that all futures are done if every queue has less than 10
# task pending.
while any(futures) or items:
if items:
yield items[0]
items = items[1:]
else:
# Let activity happen.
ndb.eventloop.run1()
# Continue iteration.
changed = False
for i, f in enumerate(futures):
if f and f.done():
items.extend(f.get_result())
futures[i] = next(yielders[i], None)
changed = True
if changed:
items.sort(key=lambda k: _queue_number_priority(k.id()), reverse=True)
### Public API.
def request_to_task_to_run_key(request):
"""Returns the ndb.Key for a TaskToRun from a TaskRequest."""
return ndb.Key(
TaskToRun, task_queues.hash_dimensions(request.properties.dimensions),
parent=request.key)
def task_to_run_key_to_request_key(task_key):
"""Returns the ndb.Key for a TaskToRun from a TaskRequest key."""
if task_key.kind() != 'TaskToRun':
raise ValueError('Expected key to TaskToRun, got %s' % task_key.kind())
return task_key.parent()
def gen_queue_number(request):
"""Returns the value to use for TaskToRun.queue_number based on request.
It is exported so a task can be retried by task_scheduler.
"""
return _gen_queue_number(
task_queues.hash_dimensions(request.properties.dimensions),
request.created_ts,
request.priority)
def new_task_to_run(request):
"""Returns a fresh new TaskToRun for the task ready to be scheduled.
Returns:
Unsaved TaskToRun entity.
"""
return TaskToRun(
key=request_to_task_to_run_key(request),
queue_number=gen_queue_number(request),
expiration_ts=request.expiration_ts)
def validate_to_run_key(task_key):
"""Validates a ndb.Key to a TaskToRun entity. Raises ValueError if invalid."""
# This also validates the key kind.
request_key = task_to_run_key_to_request_key(task_key)
key_id = task_key.integer_id()
if not key_id or key_id >= 2**32:
raise ValueError(
'TaskToRun key id should be between 1 and 2**32, found %s' %
task_key.id())
task_request.validate_request_key(request_key)
def match_dimensions(request_dimensions, bot_dimensions):
"""Returns True if the bot dimensions satisfies the request dimensions."""
assert isinstance(request_dimensions, dict), request_dimensions
assert isinstance(bot_dimensions, dict), bot_dimensions
if frozenset(request_dimensions).difference(bot_dimensions):
return False
for key, required in request_dimensions.iteritems():
bot_value = bot_dimensions[key]
if isinstance(bot_value, (list, tuple)):
if required not in bot_value:
return False
elif required != bot_value:
return False
return True
def set_lookup_cache(task_key, is_available_to_schedule):
"""Updates the quick lookup cache to mark an item as available or not.
This cache is a blacklist of items that are already reaped, so it is not worth
trying to reap it with a DB transaction. This saves on DB contention when a
high number (>1000) of concurrent bots with similar dimension are reaping
tasks simultaneously. In this case, there is a high likelihood that multiple
concurrent HTTP handlers are trying to reap the exact same task
simultaneously. This blacklist helps reduce the contention.
"""
# Set the expiration time for items in the negative cache as 15 seconds. This
# copes with significant index inconsistency but do not clog the memcache
# server with unneeded keys.
cache_lifetime = 15
assert not ndb.in_transaction()
key = _memcache_to_run_key(task_key)
if is_available_to_schedule:
# The item is now available, so remove it from memcache.
memcache.delete(key, namespace='task_to_run')
else:
memcache.set(key, True, time=cache_lifetime, namespace='task_to_run')
def yield_next_available_task_to_dispatch(bot_dimensions, deadline):
"""Yields next available (TaskRequest, TaskToRun) in decreasing order of
priority.
Once the caller determines the task is suitable to execute, it must use
reap_task_to_run(task.key) to mark that it is not to be scheduled anymore.
Performance is the top most priority here.
Arguments:
- bot_dimensions: dimensions (as a dict) defined by the bot that can be
matched.
- deadline: UTC timestamp (as an int) that the bot must be able to
complete the task by. None if there is no such deadline.
"""
assert len(bot_dimensions['id']) == 1, bot_dimensions
# List of all the valid dimensions hashed.
now = utils.utcnow()
stats = _QueryStats()
stats.deadline = deadline
bot_id = bot_dimensions[u'id'][0]
try:
for task_key in _yield_potential_tasks(bot_id):
duration = (utils.utcnow() - now).total_seconds()
if duration > 40.:
# Stop searching after too long, since the odds of the request blowing
# up right after succeeding in reaping a task is not worth the dangling
# task request that will stay in limbo until the cron job reaps it and
# retry it. The current handlers are given 60s to complete. By limiting
# search to 40s, it gives 20s to complete the reaping and complete the
# HTTP request.
return
# _validate_task() returns (request, task) if it's worth reaping.
item = _validate_task(bot_dimensions, deadline, stats, now, task_key)
if item:
yield item[0], item[1]
# If the code is still executed, it means that the task reaping wasn't
# successful.
stats.ignored += 1
finally:
logging.debug(
'yield_next_available_task_to_dispatch(%s) in %.3fs: %s',
bot_id, (utils.utcnow() - now).total_seconds(), stats)
def yield_expired_task_to_run():
"""Yields all the expired TaskToRun still marked as available."""
# The reason it is done this way as an iteration over all the pending entities
# instead of using a composite index with 'queue_number' and 'expiration_ts'
# is that TaskToRun entities are very hot and it is important to not require
# composite indexes on it. It is expected that the number of pending task is
# 'relatively low', in the orders of 100,000 entities.
now = utils.utcnow()
for task in TaskToRun.query(TaskToRun.queue_number > 0):
if task.expiration_ts < now:
yield task
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
1946,
383,
406,
9598,
40,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
326,
460,
307,
1043,
... | 3.002454 | 6,113 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from py3cw.request import Py3CW
import pytz
import datetime
import pandas as pd
p3cw = Py3CW(
key='42c40d6a17a24a119fdcd7dde2447b20c282ce6cac6046eb8a1669c45ff294cd',
secret='d8a3fbf8d887500a078fc6dabfe8c3f1fa31a439e14931e41c3ecb43a95c62f7e075aacb9166f6997549fe08acc3b021a73c5a7aa1921c8f3b8abf7978aab566e0a8db304c397f51d794df4eced57b9a291de70fbd1474a22a5f6d2db041a14359c74b58',
)
# get all bots
error, data = p3cw.request(
entity='grid_bots',
action='',
payload=dict(limit=100)
)
bots = (pd.DataFrame(data).astype(dict(
grids_quantity='float',
lower_price='float',
upper_price='float',
quantity_per_grid='float',
current_profit_usd='float',
profit_percentage='float',
current_price='float',
created_at='datetime64',
updated_at='datetime64',
investment_base_currency='float',
investment_quote_currency='float',
))).set_index('id')
bots=bots.assign(days_running=lambda x: x['created_at'] - datetime.datetime.utcnow(),
profit_per_day_usd=lambda x: x['current_profit_usd'] / x['days_running'].dt.days,
profit_percentage_per_day=lambda x: x['profit_percentage'] / x['days_running'].dt.days
)
with pd.option_context('display.float_format', '{:,.2f}'.format):
display(
bots['pair days_running profit_per_day_usd current_profit_usd profit_percentage profit_percentage_per_day'.split(' ')]
.sort_values('profit_per_day_usd').set_index('pair'))
with pd.option_context('display.float_format', '{:,.2f}'.format):
display(
bots['pair days_running profit_per_day_usd current_profit_usd profit_percentage profit_percentage_per_day'.split(' ')]
.sort_values('profit_percentage_per_day')..set_index('pair'))
profits=pd.DataFrame()
for id in bots.index.unique():
error, data = p3cw.request(
entity='grid_bots',
action='profits',
action_id=str(id)
)
p = pd.DataFrame.from_dict(data).astype(dict(profit='float', usd_profit='float', created_at='datetime64'))
p['id']=id
p['pair']=bots.loc[id].pair
profits = profits.append(p)
(profits.set_index('created_at')
.sort_index()
.groupby([pd.Grouper('pair'), pd.Grouper(freq='1D')])['id']
.count()
.unstack()
.sort_index(axis=1, ascending=False)
.fillna(0)
.astype(int)
)
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
25,
2971,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220... | 2.227859 | 1,163 |
##IT IS NEEDED IN CASE OF PYINSTALLER ONLY
#!!! DO NOT MODIFY THIS FILE !!!
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('tdl')
| [
2235,
2043,
3180,
36465,
1961,
3268,
42001,
3963,
350,
56,
38604,
7036,
1137,
22224,
198,
2,
10185,
8410,
5626,
19164,
5064,
56,
12680,
45811,
220,
10185,
198,
6738,
9485,
15798,
263,
13,
26791,
13,
25480,
82,
1330,
2824,
62,
7890,
62,
... | 2.946429 | 56 |
from django.contrib import admin
from dali.gallery.models import Gallery, Picture
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Picture, PictureAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
288,
7344,
13,
24460,
13,
27530,
1330,
12917,
11,
17741,
198,
198,
28482,
13,
15654,
13,
30238,
7,
29352,
11,
12917,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
28070,
11,
... | 3.673913 | 46 |
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
from synapse.ml.explainers._ICETransformer import _ICETransformer
from pyspark.ml.common import inherit_doc
from typing import List, Dict, Union
@inherit_doc | [
2,
15069,
357,
34,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
38559,
24290,
287,
1628,
6808,
329,
1321,
13,
198,
198,
6738,
6171,
7512,
13,
4029,
13,
20676,
50221,
13557,
2149,
2767,
... | 3.679012 | 81 |
# Generated by Django 3.2.9 on 2021-11-23 18:27
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33448,
12,
1157,
12,
1954,
1248,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 16:39:43 2018
Covariance Matrix Decomposition
@author: Satie
"""
import numpy as np
from numpy.linalg import matrix_rank
from multiprocessing import Pool
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
220,
220,
220,
15622,
319,
3825,
2365,
2579,
1467,
25,
2670,
25,
3559,
2864,
201,
198,
220,
220,
220,
220,
201,
198,
220,
220,
220,
39751,
2743,
... | 1.820359 | 167 |
TEST_LAT=-12
TEST_LONG=60
TEST_LOCATION_HIERARCHY_FOR_GEO_CODE=['madagascar']
| [
51,
6465,
62,
43,
1404,
10779,
1065,
198,
51,
6465,
62,
43,
18494,
28,
1899,
198,
51,
6465,
62,
29701,
6234,
62,
25374,
1137,
31315,
56,
62,
13775,
62,
38,
4720,
62,
34,
16820,
28,
17816,
9937,
44309,
20520,
628
] | 1.975 | 40 |
'''
Given a string s, return the longest palindromic substring in s.
Example 1:
Input: s = "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: s = "cbbd"
Output: "bb"
Example 3:
Input: s = "a"
Output: "a"
Example 4:
Input: s = "ac"
Output: "a"
Constraints:
1 <= s.length <= 1000
s consist of only digits and English letters (lower-case and/or upper-case),
'''
import numpy as np
'''
from center to two end
Time: O(n^2)
Space: O(n)
'''
| [
7061,
6,
198,
15056,
257,
4731,
264,
11,
1441,
262,
14069,
6340,
521,
398,
291,
3293,
1806,
287,
264,
13,
628,
220,
198,
198,
16281,
352,
25,
198,
198,
20560,
25,
264,
796,
366,
65,
17325,
1,
198,
26410,
25,
366,
65,
397,
1,
198... | 2.478947 | 190 |
"""
The Purpose of the PrivateBinance Arbitrage Bot (based on RoibalBot) Python Program is to create an automated trading bot (functionality) on Binance
Utilized Python-Binance ( https://github.com/sammchardy/python-binance )
Advanced-Version capable of all exchanges, all coins (using cctx)
This 'bot' will run a functionality which seeks profitable triangular arbitrage opportunities on Binance
Weekly/Daily/Hourly reports created on profit/loss
Instructional Youtube Video: https://www.youtube.com/watch?v=8AAN03M8QhA - Additional Videos Available on youtube
Created 4/14/2018 by Joaquin Roibal
V 0.01 - Updated 4/20/2018
v 0.02 - Updated 5/30/2018 - Converted to Advanced Version: https://github.com/Roibal/Cryptocurrency-Trading-Bots-Python-Beginner-Advance
v 0.03 - Created 6/18/2018 - Binance Arbitrage Bot
v 0.04 - 6/21/2018 - Changed Name to CryptoTriangularArbitrageBinanceBot.py
v 1.00 - 6/24/2018 - Converted to Private_TriArbBot.py for Private Trader Group
All Rights Reserved
ATTENION: BY RUNNING SCRIPT YOU AGREE TO REMIT 1% of PROFITS TO THE FOLLOWING ADDRESS:
BTC: 1BYrAED4pi5DMKu2qZPv8pwe6rEeuxoCiD
NOTE: All Subsequent Version of Program must contain this message, unmodified, in it's entirety
Copyright (c) 2018 by Joaquin Roibal
"""
from binance.client import Client
from binance.enums import *
import time
from datetime import datetime
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from binance.enums import *
import pprint
#import save_historical_data_Roibal
from BinanceKeys import BinanceKey1
"""
api_key = BinanceKey1['api_key']
api_secret = BinanceKey1['api_secret']
client = Client(api_key, api_secret)
"""
#Found Code To Remove 1000ms error - https://github.com/sammchardy/python-binance/issues/249
client = Binance(public_key = BinanceKey1['api_key'], secret_key = BinanceKey1['api_secret'], sync=True)
print(client)
#client.synced('order_market_buy', symbol='BNBBTC', quantity=10)
if __name__ == "__main__":
run()
| [
37811,
201,
198,
464,
32039,
286,
262,
15348,
33,
14149,
33619,
8394,
18579,
357,
3106,
319,
5564,
21342,
20630,
8,
11361,
6118,
318,
284,
2251,
281,
16359,
7313,
10214,
357,
8818,
1483,
8,
319,
347,
14149,
201,
198,
18274,
346,
1143,
... | 2.97093 | 688 |
from future.backports.http.cookies import SimpleCookie
from future.backports.urllib.parse import quote
import base64
import cgi
import hashlib
import hmac
import logging
import os
import time
from jwkest import as_unicode
from jwkest import safe_str_cmp
from six import PY2
from six import binary_type
from six import text_type
from six.moves import http_client
from oic import rndstr
from oic.exception import ImproperlyConfigured
from oic.exception import UnsupportedMethod
from oic.utils import time_util
from oic.utils.aes import AEAD
from oic.utils.aes import AESError
__author__ = 'rohe0002'
logger = logging.getLogger(__name__)
SUCCESSFUL = [200, 201, 202, 203, 204, 205, 206]
CORS_HEADERS = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Methods", "GET"),
("Access-Control-Allow-Headers", "Authorization")
]
OAUTH2_NOCACHE_HEADERS = [
('Pragma', 'no-cache'),
('Cache-Control', 'no-store'),
]
R2C = {
200: Response,
201: Created,
202: Accepted,
203: NonAuthoritativeInformation,
204: NoContent,
302: Redirect,
303: SeeOther,
400: BadRequest,
401: Unauthorized,
403: Forbidden,
404: NotFound,
405: NotSupported,
406: NotAcceptable,
500: ServiceError,
}
def extract(environ, empty=False, err=False):
"""Extracts strings in form data and returns a dict.
:param environ: WSGI environ
:param empty: Stops on empty fields (default: Fault)
:param err: Stops on errors in fields (default: Fault)
"""
formdata = cgi.parse(environ['wsgi.input'], environ, empty, err)
# Remove single entries from lists
for key, value in formdata.iteritems():
if len(value) == 1:
formdata[key] = value[0]
return formdata
def geturl(environ, query=True, path=True):
"""Rebuilds a request URL (from PEP 333).
:param query: Is QUERY_STRING included in URI (default: True)
:param path: Is path included in URI (default: True)
"""
url = [environ['wsgi.url_scheme'] + '://']
if environ.get('HTTP_HOST'):
url.append(environ['HTTP_HOST'])
else:
url.append(environ['SERVER_NAME'])
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url.append(':' + environ['SERVER_PORT'])
else:
if environ['SERVER_PORT'] != '80':
url.append(':' + environ['SERVER_PORT'])
if path:
url.append(getpath(environ))
if query and environ.get('QUERY_STRING'):
url.append('?' + environ['QUERY_STRING'])
return ''.join(url)
def getpath(environ):
"""Builds a path."""
return ''.join([quote(environ.get('SCRIPT_NAME', '')),
quote(environ.get('PATH_INFO', ''))])
def cookie_signature(key, *parts):
"""Generates a cookie signature.
:param key: The HMAC key to use.
:type key: bytes
:param parts: List of parts to include in the MAC
:type parts: list of bytes or strings
:returns: hexdigest of the HMAC
"""
assert isinstance(key, binary_type)
sha1 = hmac.new(key, digestmod=hashlib.sha1)
for part in parts:
if part:
if isinstance(part, text_type):
sha1.update(part.encode('utf-8'))
else:
sha1.update(part)
return text_type(sha1.hexdigest())
def verify_cookie_signature(sig, key, *parts):
"""Constant time verifier for signatures
:param sig: The signature hexdigest to check
:type sig: text_type
:param key: The HMAC key to use.
:type key: bytes
:param parts: List of parts to include in the MAC
:type parts: list of bytes or strings
:raises: `InvalidCookieSign` when the signature is wrong
"""
assert isinstance(sig, text_type)
return safe_str_cmp(sig, cookie_signature(key, *parts))
def _make_hashed_key(parts, hashfunc='sha256'):
"""
Construct a key via hashing the parts
If the parts do not have enough entropy of their
own, this doesn't help.
The size of the hash digest determines the size.
"""
h = hashlib.new(hashfunc)
for part in parts:
if isinstance(part, text_type):
part = part.encode('utf-8')
if part:
h.update(part)
return h.digest()
def make_cookie(name, load, seed, expire=0, domain="", path="", timestamp="",
enc_key=None):
"""
Create and return a cookie
The cookie is secured against tampering.
If you only provide a `seed`, a HMAC gets added to the cookies value
and this is checked, when the cookie is parsed again.
If you provide both `seed` and `enc_key`, the cookie gets protected
by using AEAD encryption. This provides both a MAC over the whole cookie
and encrypts the `load` in a single step.
The `seed` and `enc_key` parameters should be byte strings of at least
16 bytes length each. Those are used as cryptographic keys.
:param name: Cookie name
:type name: text
:param load: Cookie load
:type load: text
:param seed: A seed key for the HMAC function
:type seed: byte string
:param expire: Number of minutes before this cookie goes stale
:type expire: int
:param domain: The domain of the cookie
:param path: The path specification for the cookie
:param timestamp: A time stamp
:type timestamp: text
:param enc_key: The key to use for cookie encryption.
:type enc_key: byte string
:return: A tuple to be added to headers
"""
cookie = SimpleCookie()
if not timestamp:
timestamp = str(int(time.time()))
bytes_load = load.encode("utf-8")
bytes_timestamp = timestamp.encode("utf-8")
if enc_key:
# Make sure the key is 256-bit long, for AES-128-SIV
#
# This should go away once we push the keysize requirements up
# to the top level APIs.
key = _make_hashed_key((enc_key, seed))
# Random 128-Bit IV
iv = os.urandom(16)
crypt = AEAD(key, iv)
# timestamp does not need to be encrypted, just MAC'ed,
# so we add it to 'Associated Data' only.
crypt.add_associated_data(bytes_timestamp)
ciphertext, tag = crypt.encrypt_and_tag(bytes_load)
cookie_payload = [bytes_timestamp,
base64.b64encode(iv),
base64.b64encode(ciphertext),
base64.b64encode(tag)]
else:
cookie_payload = [
bytes_load, bytes_timestamp,
cookie_signature(seed, load, timestamp).encode('utf-8')]
cookie[name] = (b"|".join(cookie_payload)).decode('utf-8')
if path:
cookie[name]["path"] = path
if domain:
cookie[name]["domain"] = domain
if expire:
cookie[name]["expires"] = _expiration(expire,
"%a, %d-%b-%Y %H:%M:%S GMT")
return tuple(cookie.output().split(": ", 1))
def parse_cookie(name, seed, kaka, enc_key=None):
"""Parses and verifies a cookie value
Parses a cookie created by `make_cookie` and verifies
it has not been tampered with.
You need to provide the same `seed` and `enc_key`
used when creating the cookie, otherwise the verification
fails. See `make_cookie` for details about the verification.
:param seed: A seed key used for the HMAC signature
:type seed: bytes
:param kaka: The cookie
:param enc_key: The encryption key used.
:type enc_key: bytes or None
:raises InvalidCookieSign: When verification fails.
:return: A tuple consisting of (payload, timestamp) or None if parsing fails
"""
if not kaka:
return None
if isinstance(seed, text_type):
seed = seed.encode('utf-8')
parts = cookie_parts(name, kaka)
if parts is None:
return None
elif len(parts) == 3:
# verify the cookie signature
cleartext, timestamp, sig = parts
if not verify_cookie_signature(sig, seed, cleartext, timestamp):
raise InvalidCookieSign()
return cleartext, timestamp
elif len(parts) == 4:
# encrypted and signed
timestamp = parts[0]
iv = base64.b64decode(parts[1])
ciphertext = base64.b64decode(parts[2])
tag = base64.b64decode(parts[3])
# Make sure the key is 32-Bytes long
key = _make_hashed_key((enc_key, seed))
crypt = AEAD(key, iv)
# timestamp does not need to be encrypted, just MAC'ed,
# so we add it to 'Associated Data' only.
crypt.add_associated_data(timestamp.encode('utf-8'))
try:
cleartext = crypt.decrypt_and_verify(ciphertext, tag)
except AESError:
raise InvalidCookieSign()
return cleartext.decode('utf-8'), timestamp
return None
| [
6738,
2003,
13,
1891,
3742,
13,
4023,
13,
27916,
444,
1330,
17427,
34,
18055,
198,
6738,
2003,
13,
1891,
3742,
13,
333,
297,
571,
13,
29572,
1330,
9577,
198,
198,
11748,
2779,
2414,
198,
11748,
269,
12397,
198,
11748,
12234,
8019,
198... | 2.452263 | 3,624 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Cem Gurkok
@license: GNU General Public License 2.0
@contact: cemgurkok@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.debug as debug
import volatility.utils as utils
import volatility.plugins.mac.pstasks as mac_tasks
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
## http://hte.sourceforge.net/doxygenized-0.8.0pre1/machostruc_8h-source.html
## documentation for thread state, registry, launch cmd etc
thread_overlay = {
"thread": [ None, {
"options": [None, ['Flags', {'target': 'int', 'bitmap': {
"TH_OPT_INTMASK": 0,# interrupt / abort level
"TH_OPT_INTMASK": 1,# interrupt / abort level
"TH_OPT_VMPRIV": 2, # may allocate reserved memory
"TH_OPT_DTRACE": 3, # executing under dtrace_probe
"TH_OPT_SYSTEM_CRITICAL": 4, # Thread must always be allowed to run, even under heavy load
"TH_OPT_PROC_CPULIMIT": 5, # Thread has a task-wide CPU limit applied to it
"TH_OPT_PRVT_CPULIMIT": 6 # Thread has a thread-private CPU limit applied to it
}}]],
"state": [None, ['Flags', {'target': 'int', 'bitmap': {
"TH_WAIT": 0,
"TH_SUSP": 1,
"TH_RUN": 2,
"TH_UNINT": 3,
"TH_TERMINATE": 4,
"TH_TERMINATE2": 5,
"TH_IDLE": 6, # kAppleProfileTriggerClientThreadModeIdle
"TH_IDLE_N": 6 << 16 # kAppleProfileTriggerClientThreadModeNotIdle, !TH_IDLE
}}]],
"sched_mode": [None, ['Flags', {'target': 'int', 'bitmap': {
"TH_MODE_REALTIME": 0, # /* time constraints supplied */
"TH_MODE_TIMESHARE": 1, # /* use timesharing algorithm */
"TH_MODE_FAILSAFE": 2, # /* fail-safe has tripped */
"TH_MODE_PROMOTED": 3, # /* sched pri has been promoted */
"TH_MODE_ABORT": 4, # /* abort interruptible waits */
"TH_MODE_ABORTSAFELY": 5, # /* ... but only those at safe point */
# "TH_MODE_ISABORTED": (TH_MODE_ABORT | TH_MODE_ABORTSAFELY)
"TH_MODE_DEPRESS": 6, # /* normal depress yield */
"TH_MODE_POLLDEPRESS": 7, # /* polled depress yield */
# "TH_MODE_ISDEPRESSED": (TH_MODE_DEPRESS | TH_MODE_POLLDEPRESS)
}}]],
"ast": [None, ['Flags', {'target': 'int', 'bitmap': { # Asynchronous System Traps
# AST_NONE , no bits set
"AST_HALT": 0,
"AST_TERMINATE": 1,
"AST_BLOCK": 2,
"AST_UNUSED": 3,
"AST_QUANTUM": 4,
"AST_APC": 5, # /* migration APC hook */
"AST_URGENT": 6
}}]],
}]
}
# needed a separate walk_list function for threads since the original was task specific
# https://www.opensource.apple.com/source/xnu/xnu-124.1/osfmk/mach/vm_statistics.h
dict_alias = {
1: "VM_MEMORY_MALLOC",
2: "VM_MEMORY_MALLOC_SMALL",
3: "VM_MEMORY_MALLOC_LARGE",
4: "VM_MEMORY_MALLOC_HUGE",
5: "VM_MEMORY_SBRK",
6: "VM_MEMORY_REALLOC",
7: "VM_MEMORY_MALLOC_TINY",
8: "VM_MEMORY_MALLOC_LARGE_REUSABLE",
9: "VM_MEMORY_MALLOC_LARGE_REUSED",
10: "VM_MEMORY_ANALYSIS_TOOL",
20: "VM_MEMORY_MACH_MSG",
21: "VM_MEMORY_IOKIT",
30: "VM_MEMORY_STACK",
31: "VM_MEMORY_GUARD",
32: "VM_MEMORY_SHARED_PMAP",
33: "VM_MEMORY_DYLIB",
34: "VM_MEMORY_OBJC_DISPATCHERS",
35: "VM_MEMORY_UNSHARED_PMAP",
40: "VM_MEMORY_APPKIT",
41: "VM_MEMORY_FOUNDATION",
42: "VM_MEMORY_COREGRAPHICS",
43: "VM_MEMORY_CORESERVICES",
44: "VM_MEMORY_JAVA",
50: "VM_MEMORY_ATS",
51: "VM_MEMORY_LAYERKIT",
52: "VM_MEMORY_CGIMAGE",
53: "VM_MEMORY_TCMALLOC",
54: "VM_MEMORY_COREGRAPHICS_DATA",
55: "VM_MEMORY_COREGRAPHICS_SHARED",
56: "VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS",
57: "VM_MEMORY_COREGRAPHICS_BACKINGSTORES",
60: "VM_MEMORY_DYLD",
61: "VM_MEMORY_DYLD_MALLOC",
62: "VM_MEMORY_SQLITE",
63: "VM_MEMORY_JAVASCRIPT_CORE",
64: "VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR",
65: "VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE",
66: "VM_MEMORY_GLSL",
67: "VM_MEMORY_OPENCL",
68: "VM_MEMORY_COREIMAGE",
69: "VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS",
70: "VM_MEMORY_IMAGEIO",
71: "VM_MEMORY_COREPROFILE",
72: "VM_MEMORY_ASSETSD",
240: "VM_MEMORY_APPLICATION_SPECIFIC_1",
241: "VM_MEMORY_APPLICATION_SPECIFIC_2",
242: "VM_MEMORY_APPLICATION_SPECIFIC_3",
243: "VM_MEMORY_APPLICATION_SPECIFIC_4",
244: "VM_MEMORY_APPLICATION_SPECIFIC_5",
245: "VM_MEMORY_APPLICATION_SPECIFIC_6",
246: "VM_MEMORY_APPLICATION_SPECIFIC_7",
247: "VM_MEMORY_APPLICATION_SPECIFIC_8",
248: "VM_MEMORY_APPLICATION_SPECIFIC_9",
249: "VM_MEMORY_APPLICATION_SPECIFIC_10",
250: "VM_MEMORY_APPLICATION_SPECIFIC_11",
251: "VM_MEMORY_APPLICATION_SPECIFIC_12",
252: "VM_MEMORY_APPLICATION_SPECIFIC_13",
253: "VM_MEMORY_APPLICATION_SPECIFIC_14",
254: "VM_MEMORY_APPLICATION_SPECIFIC_15",
255: "VM_MEMORY_APPLICATION_SPECIFIC_16"
}
class mac_threads(mac_tasks.mac_tasks):
""" List Process Threads """
| [
2,
4709,
18486,
198,
2,
15069,
357,
34,
8,
4343,
12,
6390,
4709,
18486,
5693,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
4709,
18486,
13,
198,
2,
198,
2,
4709,
18486,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,... | 1.499609 | 6,387 |
import re
import string
from datetime import datetime
import nltk
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
linkPattern = (
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
| [
11748,
302,
198,
11748,
4731,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
299,
2528,
74,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
6738,
299,
2528,
74,
13,
34086,
3681,
13,
85,
5067,
1330,
11352,
... | 2.369318 | 176 |
from sympy.core.numbers import Integer
from sympy.matrices.dense import (eye, zeros)
i3 = Integer(3)
M = eye(100)
| [
6738,
10558,
88,
13,
7295,
13,
77,
17024,
1330,
34142,
198,
6738,
10558,
88,
13,
6759,
45977,
13,
67,
1072,
1330,
357,
25379,
11,
1976,
27498,
8,
198,
198,
72,
18,
796,
34142,
7,
18,
8,
198,
44,
796,
4151,
7,
3064,
8,
628,
628,
... | 2.586957 | 46 |
# tokenizer for DL models
#df is the dataframe with the columns name and labels for training tokenizer
#funtion for pad seq
#input train dataframe and test dataframe and tokenizer
#takes train dataframe and test dataframe as inputs and returns their enoding
#compile model and train it
#function to predict labels
#takes the model, names list and toke as input
| [
198,
2,
11241,
7509,
329,
23641,
4981,
198,
2,
7568,
318,
262,
1366,
14535,
351,
262,
15180,
1438,
290,
14722,
329,
3047,
11241,
7509,
198,
2,
69,
2797,
295,
329,
14841,
33756,
198,
2,
15414,
4512,
1366,
14535,
290,
1332,
1366,
14535,... | 3.934783 | 92 |
# Generated by Django 2.2.5 on 2019-12-05 18:21
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
13130,
12,
1065,
12,
2713,
1248,
25,
2481,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import sys, neo
import matplotlib.pyplot as plt
def load_neo_block(filename):
""" Load data from file into Neo """
with neo.get_io(filename) as io:
return io.read_block()
def plot_analogsignal(block, filename):
""" Plot first AnalogSignal of Neo """
anasig = block.segments[0].analogsignals[0]
plt.plot(anasig.times, anasig.magnitude, label=anasig.name)
plt.xlabel('Time [{}]'.format(anasig.times.dimensionality.latex))
plt.ylabel('Amplitude [{}]'.format(anasig.dimensionality.latex))
plt.legend()
plt.savefig(filename)
if __name__=='__main__':
neo_filename, plot_filename = sys.argv[1:]
block = load_neo_block(neo_filename)
plot_analogsignal(block, plot_filename)
| [
11748,
25064,
11,
19102,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
4299,
3440,
62,
710,
78,
62,
9967,
7,
34345,
2599,
198,
220,
220,
220,
37227,
8778,
1366,
422,
2393,
656,
21227,
37227,
198,
220,
220,
... | 2.436667 | 300 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_compat
------------
Tests for `cookiecutter.compat` module.
"""
from cookiecutter.compat import which
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
9288,
62,
5589,
265,
198,
10541,
198,
198,
51,
3558,
329,
4600,
44453,
8968,
353,
13,
5589,
265,
63,... | 2.612903 | 62 |
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, User, Permission
DEFAULT_DEV_ENV_PASS = "password123"
INTERNAL_PROVIDER_PERMISSIONS = [
"add_insurer",
"change_insurer",
"view_insurer",
"add_participant",
"change_participant",
"view_participant",
"add_program",
"change_program",
"view_program",
"add_service",
"change_service",
"view_service",
"add_visit",
"change_visit",
"view_visit",
"add_hcvnote",
"change_hcvnote",
"view_hcvnote",
"add_medication",
"change_medication",
"view_medication",
"add_appointment",
"change_appointment",
"view_appointment",
"add_casemanagement",
"change_casemanagement",
"view_casemanagement",
"add_frontdeskevent",
"change_frontdeskevent",
"view_frontdeskevent",
"add_urinedrugscreen",
"change_urinedrugscreen",
"view_urinedrugscreen",
"add_programavailability",
"change_programavailability",
"view_programavailability",
"add_behavioralhealthnote",
"change_behavioralhealthnote",
"view_behavioralhealthnote",
"view_site",
"add_sepdata",
"change_sepdata",
"view_sepdata",
]
FRONT_DESK_PERMISSIONS = [
"add_insurer",
"change_insurer",
"view_insurer",
"add_participant",
"change_participant",
"view_participant",
"add_program",
"change_program",
"view_program",
"add_service",
"change_service",
"view_service",
"add_visit",
"change_visit",
"add_appointment",
"change_appointment",
"view_appointment",
"add_frontdeskevent",
"change_frontdeskevent",
"view_frontdeskevent",
"add_programavailability",
"change_programavailability",
"view_programavailability",
"view_site",
"add_sepdata",
]
UDS_PROVIDER_PERMISSIONS = [
"view_insurer",
"add_participant",
"change_participant",
"view_participant",
"view_program",
"view_service",
"change_visit",
"view_visit",
"add_appointment",
"change_appointment",
"view_appointment",
"add_frontdeskevent",
"change_frontdeskevent",
"view_frontdeskevent",
"view_programavailability",
"add_urinedrugscreen",
"change_urinedrugscreen",
"view_urinedrugscreen",
"view_site",
]
DEFAULT_GROUPS = {
"front_desk": FRONT_DESK_PERMISSIONS,
"uds_provider": UDS_PROVIDER_PERMISSIONS,
"internal_provider": INTERNAL_PROVIDER_PERMISSIONS,
}
def add_users_to_groups(output=True):
"""
adds user to group of same name
"""
for group in DEFAULT_GROUPS:
user = User.objects.get(username=group)
role_title = Group.objects.get(name=group)
user.groups.add(role_title)
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
4912,
11,
11787,
11,
2448,
3... | 2.352301 | 1,195 |
from clustre.models._models import (
cifar10_cnn,
cifar10_wide_resnet34_10,
mnist_cnn,
mnist_resnet18,
)
| [
6738,
32966,
260,
13,
27530,
13557,
27530,
1330,
357,
198,
220,
220,
220,
269,
361,
283,
940,
62,
66,
20471,
11,
198,
220,
220,
220,
269,
361,
283,
940,
62,
4421,
62,
411,
3262,
2682,
62,
940,
11,
198,
220,
220,
220,
285,
77,
39... | 1.890625 | 64 |
import csv
import os
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
from amp.config import KL_ANNEALRATE, MAX_KL, MAX_LENGTH, MAX_TEMPERATURE, MIN_KL, MIN_TEMPERATURE, TAU_ANNEALRATE
from amp.data_utils import sequence
from amp.models.model import Model
from amp.utils.basic_model_serializer import BasicModelSerializer
from keras import backend
from keras.callbacks import Callback
| [
11748,
269,
21370,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
20766,
13,
11250,
1330,
48253,
62,
1565,
12161,
1847,
49... | 3.142857 | 133 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-09 21:45
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
940,
12,
2931,
2310,
25,
2231,
201,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
... | 2.53125 | 64 |
#!/usr/bin/python3
import argparse
try:
from bin import banner
from bin import trim_url
banner.print_banner()
parser = argparse.ArgumentParser(prog='webvulsca.py', usage='%(prog)s [options]', add_help=False,
formatter_class=CapitalisedHelpFormatter)
parser._positionals.title = 'Positional arguments'
parser._optionals.title = 'Options'
parser.add_argument('-u', '--url', help='Target URL (e.g. "http://www.site.com/")', required=True, metavar='')
parser.add_argument('-o', '--outfile', help='Output Directory Path (e.g. "/root/example_dir/")', metavar='',
required=True)
parser.add_argument('-c','--crawl',help='This option crawls the complete website',action='store_true',default=False)
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit.')
args = parser.parse_args()
trim_url.url_splitter(args.url,args.crawl)
except ImportError:
print("[-] Unsupported for Python 2")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
1822,
29572,
628,
198,
28311,
25,
198,
220,
220,
220,
422,
9874,
1330,
17625,
198,
220,
220,
220,
422,
9874,
1330,
15797,
62,
6371,
628,
198,
220,
220,
220,
17625,
13,
4798,... | 2.457014 | 442 |
# The following code has been modified from that provided at github.com/piborg/diablo
# by Garrisen Cizmich
# To run this program (or any other program with I2C control), you must perform the following:
# Open Terminal.
# Type "ps aux | grep ManualMotorControl.py" without the quotes and press Enter.
# Find the process owned by root with "sudo" in the command to the right. Note its PID (number in the second column).
# Type "sudo kill -TERM [pid]" without the quotes, replacing [pid] with the PID found above. Press Enter.
# You can now start this program.
# Runs through a simulation of the RockSat-X 2020 mission. The current delay between each step is 10 seconds. Each line in the below summary is a step.
# Initially waits.
# "Turns on" 360 camera and RFID experiment (not currently operational).
# Extends boom for specified length of time (dictated by the variable "motorOnTime").
# Retracts boom for specified length of time (dictated by the variable "motorOnTime").
# "Turns off" 360 camera and RFID experiment (not currently operational).
# To kill the motor, press Ctrl+C at any time. DO NOT USE THE STOP BUTTON!
# If all else fails, pull the jumper off the motor driver board. This action will immediately stop the motor. It cannot be started until the jumper is replaced, however.
# Import library functions we need
from __future__ import print_function
from diablo import *
from time import sleep
from sys import exit
import RPi.GPIO as GPIO # Fetch the GPIO library and initialize the object.
GPIO.setmode(GPIO.BCM)
# This value is the length of time (in seconds) for which the motor will be turned on for boom extension/retraction.
motorOnTime = 10 # CHANGE AS NECESSARY!!!
# Set up pins 9 and 10 for input.
GPIO.setup(24, GPIO.IN)
GPIO.setup(23, GPIO.IN)
# Set up the Diablo
DIABLO = Diablo() # Create a new Diablo object
DIABLO.Init() # Set the board up (checks the board is connected)
if not DIABLO.foundChip:
boards = ScanForDiablo()
if len(boards) == 0:
print('No Diablo found, check you are attached :)')
else:
print('No Diablo at address %02X, but we did find boards:' % (DIABLO.i2cAddress))
for board in boards:
print(' %02X (%d)' % (board, board))
print('If you need to change the I2C address change the set-up line so it is correct, e.g.')
print('DIABLO.i2cAddress = 0x%02X' % (boards[0]))
exit()
#DIABLO.SetEpoIgnore(True) # Uncomment to disable EPO latch, needed if you do not have a switch / jumper
DIABLO.ResetEpo() # Reset the stop switch (EPO) state
# if you do not have a switch across the two pin header then fit the jumper
# Loop over the sequence until the user presses CTRL+C
print ('Press CTRL+C to cancel manual motor control.')
try:
print('Simulation started. First action will occur in 10 seconds.')
sleep(10) # Sleep for 10 s, now at t = 10 s.
print('Turning on 360 camera and RFID experiment... (currently does nothing)')
# Add code to turn on 360 camera and RFID experiment.
sleep(10) # Sleep for 10 s, now at t = 20 s.
print('Extending boom...')
DIABLO.SetMotor1(-1.0) # Activate the motor in a ccw direction (from back of motor).
sleep(motorOnTime) # Sleep for the desired motor on time (please configure this delay above).
DIABLO.SetMotor1(0.0) # Turn off motor.
print('Boom extended. Holding position...')
sleep(10) # Sleep for 10 s, now at t = 30 s + motorOnTime/1000.
print('Retracting boom...')
DIABLO.SetMotor1(+1.0) # Activate the motor in a cw direction (from back of motor)
sleep(motorOnTime) # Sleep for the desired motor on time (again, please configure this delay above).
DIABLO.SetMotor1(0.0)
print('Boom retracted. Waiting to turn off camera and experiment...')
sleep(10) # Sleep for 10 s, now at t = 40 s + 2 * motorOntime/1000.
print('Turning off 360 camera and RFID experiment... (currently does nothing)')
# Add code to turn off 360 camera and RFID experiment.
DIABLO.MotorsOff() # Turn off motors just in case issues occur.
print('The program has finished running.')
except KeyboardInterrupt:
# User has pressed CTRL+C
DIABLO.MotorsOff() # Turn both motors off
print ('Done')
| [
2,
383,
1708,
2438,
468,
587,
9518,
422,
326,
2810,
379,
33084,
13,
785,
14,
79,
571,
2398,
14,
10989,
18817,
198,
2,
416,
7164,
2442,
268,
327,
528,
76,
488,
198,
198,
2,
1675,
1057,
428,
1430,
357,
273,
597,
584,
1430,
351,
31... | 3.014614 | 1,437 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628
] | 3.333333 | 6 |
""" An object that wraps the Dephy ActPack """
import os, sys
import time
import csv
import traceback
import numpy as np
import h5py
import deprecated
from enum import Enum
from math import isfinite
from os.path import realpath
# Dephy library import
from flexsea import fxUtils as fxu # pylint: disable=no-name-in-module
from flexsea import fxEnums as fxe # pylint: disable=no-name-in-module
from flexsea import flexsea as flex
# Version of the ActPackMan library
__version__="1.0.0"
class FlexSEA(flex.FlexSEA):
""" A singleton class that prevents re-initialization of FlexSEA """
_instance = None
# See ActPackState for all available data
labels = [ # matches varsToStream
"State time",
"Motor angle", "Motor velocity", "Motor acceleration",
"Motor voltage", "Motor current",
"Battery voltage", "Battery current"
]
DEFAULT_VARIABLES = [ # struct fields defined in flexsea/dev_spec/ActPackState.py
"state_time",
"mot_ang", "mot_vel", "mot_acc",
"mot_volt", "mot_cur",
"batt_volt", "batt_curr"
]
MOTOR_CLICKS_PER_REVOLUTION = 16384
RAD_PER_SEC_PER_GYRO_LSB = np.pi/180/32.8
G_PER_ACCELEROMETER_LSB = 1./8192
NM_PER_AMP = 0.146
RAD_PER_CLICK = 2*np.pi/MOTOR_CLICKS_PER_REVOLUTION
RAD_PER_DEG = np.pi/180.
ticks_to_motor_radians = lambda x: x*(np.pi/180./45.5111)
motor_radians_to_ticks = lambda q: q*(180*45.5111/np.pi)
class ActPackMan(object):
""" (Dephy) Actuator Pack Manager
Keeps track of a single Dephy Actuator
"""
def __init__(self, devttyACMport, baudRate=230400, csv_file_name=None,
hdf5_file_name=None, vars_to_log=DEFAULT_VARIABLES, gear_ratio=1.0,
printingRate = 10, updateFreq = 100, shouldLog = False, logLevel=6):
""" Intializes variables, but does not open the stream. """
#init printer settings
self.updateFreq = updateFreq
self.shouldLog = shouldLog
self.logLevel = logLevel
self.prevReadTime = time.time()-1/self.updateFreq
self.gear_ratio = gear_ratio
# self.varsToStream = varsToStream
self.baudRate = baudRate
self.named_port = devttyACMport
self.devttyACMport = realpath(devttyACMport)
self.csv_file_name = csv_file_name
self.hdf5_file_name = hdf5_file_name
self.csv_file = None
self.csv_writer = None
self.vars_to_log = vars_to_log
self.entered = False
self._state = None
self.act_pack = None # code for never having updated
## 'With'-block interface for ensuring a safe shutdown.
def __enter__(self):
""" Runs when the object is used in a 'with' block. Initializes the comms."""
if self.csv_file_name is not None:
with open(self.csv_file_name,'w') as fd:
writer = csv.writer(fd)
writer.writerow(["pi_time"]+self.vars_to_log)
self.csv_file = open(self.csv_file_name,'a').__enter__()
self.csv_writer = csv.writer(self.csv_file)
if self.hdf5_file_name is not None:
self.hdf5_file = h5py.File(self.hdf5_file_name, 'w')
fxs = FlexSEA() # grab library singleton (see impl. in ActPackMan.py)
# dev_id = fxs.open(port, baud_rate, log_level=6)
self.dev_id = fxs.open(self.devttyACMport, self.baudRate, log_level=self.logLevel)
# fxs.start_streaming(dev_id, 100, log_en=False)
# Start stream
# fxs = FlexSEA() # grab library singleton (see impl. in ActPackMan.py)
fxs.start_streaming(self.dev_id, self.updateFreq, log_en=self.shouldLog)
print('devID %d streaming from %s (i.e. %s)'%(
self.dev_id, self.devttyACMport, self.named_port))
time.sleep(0.1)
# app_type = fxs.get_app_type(dev_id)
# self.app_type = fxs.get_app_type(self.dev_id)
# print(self.app_type)
self._state = _ActPackManStates.VOLTAGE
self.entered = True
return self
def __exit__(self, etype, value, tb):
""" Runs when leaving scope of the 'with' block. Properly terminates comms and file access."""
if not (self.dev_id is None):
print('Turning off control for device %s (i.e. %s)'%(self.devttyACMport, self.named_port))
t0=time.time()
fxs = FlexSEA() # singleton
# fxs.send_motor_command(self.dev_id, fxe.FX_NONE, 0) # 0 mV
self.v = 0.0
# fxs.stop_streaming(self.dev_id) # experimental
# sleep(0.1) # Works
self.update()
time.sleep(1.0/self.updateFreq) # Works
while(abs(self.i)>0.1):
self.update()
self.v = 0.0
time.sleep(1.0/self.updateFreq)
# fxs.send_motor_command(self.dev_id, fxe.FX_NONE, 0) # 0 mV
# sleep(0.0) # doesn't work in that it results in the following ridiculous warning:
# "Detected stream from a previous session, please power cycle the device before continuing"
fxs.close(self.dev_id)
time.sleep(1.0/self.updateFreq)
print('done.', time.time()-t0)
if self.csv_file_name is not None:
self.csv_file.__exit__(etype, value, tb)
if not (etype is None):
traceback.print_exception(etype, value, tb)
## Critical data reading function. Run update exactly once per loop.
## Gain Setting and Control Mode Switching (using hidden member self._state)
"""
The behavior of these gain-setting function is to require setting gains
before setting the corresponding set-point. Voltage mode requires no
gains, and therefore can be accessed at any time. Setting a voltage means
gains need to be re-specified before any other mode can be controlled.
"""
## Primary getters and setters
# electrical variables
# motor-side variables
# output variables
# other
## Greek letter math symbol property interface. This is the good
# interface, for those who like code that resembles math. It works best
# to use the UnicodeMath plugin for sublime-text, "Fast Unicode Math
# Characters" in VS Code, or the like to allow easy typing of ϕ, θ, and
# τ.
# electrical variables
v = property(get_voltage_qaxis_volts, set_voltage_qaxis_volts, doc="voltage_qaxis_volts")
i = property(get_current_qaxis_amps, set_current_qaxis_amps, doc="current_qaxis_amps")
# motor-side variables
ϕ = property(get_motor_angle_radians, set_motor_angle_radians, doc="motor_angle_radians")
ϕd = property (get_motor_velocity_radians_per_second,
set_motor_velocity_radians_per_second, doc="motor_velocity_radians_per_second")
ϕdd = property(get_motor_acceleration_radians_per_second_squared,
set_motor_acceleration_radians_per_second_squared,
doc="motor_acceleration_radians_per_second_squared")
τm = property(get_motor_torque_newton_meters, set_motor_torque_newton_meters,
doc="motor_torque_newton_meters")
# output-side variables
θ = property(get_output_angle_radians, set_output_angle_radians)
θd = property(get_output_velocity_radians_per_second,
set_output_velocity_radians_per_second, doc="output_velocity_radians_per_second")
θdd = property(get_output_acceleration_radians_per_second_squared,
set_output_acceleration_radians_per_second_squared,
doc="output_acceleration_radians_per_second_squared")
τ = property(get_output_torque_newton_meters, set_output_torque_newton_meters,
doc="output_torque_newton_meters")
# other
α = property(get_accelerometer_vector_gravity, doc="accelerometer vector, g")
ω = property(get_gyro_vector_radians_per_second, doc="gyro vector, rad/s")
## Weird-unit getters and setters
| [
37811,
1052,
2134,
326,
27521,
262,
2129,
12114,
2191,
11869,
37227,
198,
198,
11748,
28686,
11,
25064,
198,
11748,
640,
198,
11748,
269,
21370,
198,
11748,
12854,
1891,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
289,
20,
9078,
198,
... | 2.324629 | 3,370 |
import logging
import json
import boto3
from ask_amy.core.object_dictionary import ObjectDictionary
logger = logging.getLogger()
| [
11748,
18931,
198,
11748,
33918,
198,
11748,
275,
2069,
18,
198,
6738,
1265,
62,
14814,
13,
7295,
13,
15252,
62,
67,
14188,
1330,
9515,
35,
14188,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
628
] | 3.447368 | 38 |
from sanic import Sanic
from sanic.response import text, json
import json as Json
from intuitlib.client import AuthClient
from quickbooks import QuickBooks
from quickbooks.objects.customer import Customer
from quickbooks.objects.invoice import Invoice
from quickbooks.objects.account import Account
from quickbooks.objects.purchase import Purchase
from quickbooks.objects.payment import Payment
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
from sqlalchemy import select
from contextvars import ContextVar
from models import QBExport
from encoder import AlchemyEncoder
from sanic_cors import CORS, cross_origin
app = Sanic("qb_app")
CORS(app)
_base_model_session_ctx = ContextVar("session")
bind = create_async_engine("postgresql+asyncpg://postgres:postgres@localhost/qbexample", echo=True)
qb_auth_client = AuthClient(
client_id='<client ID>',
client_secret='<client secret>',
environment='sandbox',
redirect_uri='https://developer.intuit.com/v2/OAuth2Playground/RedirectUrl'
)
client = QuickBooks(
auth_client=qb_auth_client,
refresh_token='<refresh token>',
company_id='<company id>'
)
app.ctx.qb_client = client
@app.middleware("request")
@app.middleware("response")
@app.get("/")
@app.get("/qbdata")
| [
6738,
5336,
291,
1330,
2986,
291,
198,
6738,
5336,
291,
13,
26209,
1330,
2420,
11,
33918,
198,
11748,
33918,
355,
449,
1559,
198,
198,
6738,
493,
5013,
8019,
13,
16366,
1330,
26828,
11792,
198,
6738,
2068,
12106,
1330,
12029,
30650,
198... | 3.086854 | 426 |
from .Interpolation import Interpolation, linearSpline, cubicSpline | [
6738,
764,
9492,
16104,
341,
1330,
4225,
16104,
341,
11,
14174,
26568,
500,
11,
27216,
26568,
500
] | 3.941176 | 17 |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"api_version": "apiVersion",
"ca_bundle": "caBundle",
"cluster_name": "clusterName",
"external_server_ur_ls": "externalServerURLs",
"last_generation": "lastGeneration",
"last_transition_time": "lastTransitionTime",
"observed_generation": "observedGeneration",
"registration_image_pull_spec": "registrationImagePullSpec",
"related_resources": "relatedResources",
"work_image_pull_spec": "workImagePullSpec",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"apiVersion": "api_version",
"caBundle": "ca_bundle",
"clusterName": "cluster_name",
"externalServerURLs": "external_server_ur_ls",
"lastGeneration": "last_generation",
"lastTransitionTime": "last_transition_time",
"observedGeneration": "observed_generation",
"registrationImagePullSpec": "registration_image_pull_spec",
"relatedResources": "related_resources",
"workImagePullSpec": "work_image_pull_spec",
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
1067,
67,
17,
79,
377,
12994,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.710462 | 411 |
#!/usr/bin/env python3
"""
This document is created by magic at 2018/8/17
This is impletement of merge sort
归并排序
"""
def merge(left, right):
"""
合并两个已经排好序的列表
:param left:
:param right:
:return:
"""
tmp = []
i, j = 0, 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
tmp.append(left[i])
i += 1
else:
tmp.append(right[j])
j += 1
tmp += left[i:]
tmp += right[j:]
return tmp
if __name__ == '__main__':
v = [5, 3, 2, 4, 7, 8, 1, 9]
print(merge_sort(v))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
1212,
3188,
318,
2727,
416,
5536,
379,
2864,
14,
23,
14,
1558,
198,
198,
1212,
318,
848,
1616,
972,
286,
20121,
3297,
198,
37605,
240,
33176,
35050,
236,
240,
41... | 1.810976 | 328 |
''' Test pykarbon.i2c functions '''
import pykarbon.i2c as pki
| [
7061,
6,
6208,
12972,
21070,
4189,
13,
72,
17,
66,
5499,
705,
7061,
198,
11748,
12972,
21070,
4189,
13,
72,
17,
66,
355,
279,
4106,
628,
628
] | 2.444444 | 27 |
#! /usr/bin/python3
#import os.path
#import tornado.httpserver
#import tornado.websocket
#import tornado.ioloop
#import tornado.web
import RPi.GPIO as GPIO
import time
#Initialize Raspberry PI GPIO
GPIO.setmode(GPIO.BOARD)
# GPIO.setup(11, GPIO.OUT)
# GPIO.setup(13, GPIO.OUT)
# GPIO.setup(16, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
#Tonado server port
#PORT = 80
#class MainHandler(tornado.web.RequestHandler):
#def get(self):
#print ("[HTTP](MainHandler) User Connected.")
#self.render("index.html")
#class WSHandler(tornado.websocket.WebSocketHandler):
#def open(self):
#print ('[WS] Connection was opened.')
#def on_message(self, message):
#print ('[WS] Incoming message:'), message
GPIO.output(18, True)
time.sleep(5)
GPIO.output(18, False)
# if message == 'on_b':
# GPIO.output(11 , True)
# if message == 'off_b':
# GPIO.output(11 , False)
# if message == 'on_w':
# GPIO.output(13 , True)
# if message == 'off_w':
# GPIO.output(13 , False)
GPIO.cleanup()
#End of Program
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
11748,
28686,
13,
6978,
198,
2,
11748,
33718,
13,
5450,
18497,
198,
2,
11748,
33718,
13,
732,
1443,
5459,
198,
2,
11748,
33718,
13,
1669,
11224,
198,
2,
11748,
33718,
13,
12... | 2.332623 | 469 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Shariq and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import flt
from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
12131,
11,
911,
2743,
80,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
... | 3.403846 | 104 |
import hashlib
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.shortcuts import render, HttpResponse
from django.shortcuts import redirect
from django.http import StreamingHttpResponse, JsonResponse
from django.urls import reverse
from login.models import User
from rent.models import rentOrder
from house.models import house
from . import forms
from repair.models import repairOrder,subscribe
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
| [
11748,
12234,
8019,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
19509,
23779,
... | 3.862319 | 138 |
import json
| [
11748,
33918,
198
] | 4 | 3 |
from django import template
register = template.Library()
@register.filter
@register.filter | [
6738,
42625,
14208,
1330,
11055,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
24455,
198,
198,
31,
30238,
13,
24455
] | 3.8 | 25 |
"""
Module that cleans up temporary files.
Original source: https://github.com/dmyersturnbull/tyrannosaurus
Copyright 2020–2021 Douglas Myers-Turnbull
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0
"""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Optional, Sequence
from typing import Tuple as Tup
from tyrannosaurus.context import Context
from tyrannosaurus.helpers import TrashList, scandir_fast
logger = logging.getLogger(__package__)
__all__ = ["Clean"]
| [
37811,
198,
26796,
326,
20658,
510,
8584,
3696,
13,
198,
198,
20556,
2723,
25,
3740,
1378,
12567,
13,
785,
14,
67,
1820,
263,
301,
700,
16308,
14,
774,
81,
1236,
47650,
198,
15269,
12131,
1906,
1238,
2481,
15796,
25434,
12,
17278,
163... | 3.641711 | 187 |
#!/usr/bin/env python3
import groupLine as groupLine
import csv
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1448,
13949,
355,
1448,
13949,
198,
11748,
269,
21370,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198,
220,
220,
220,... | 2.122807 | 57 |
import numpy as np
from skimage.filters import frangi, hessian
from skimage.data import camera
from skimage.util import crop
from skimage._shared.testing import (assert_equal, assert_almost_equal,
assert_allclose)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
13,
10379,
1010,
1330,
1216,
648,
72,
11,
339,
824,
666,
198,
6738,
1341,
9060,
13,
7890,
1330,
4676,
198,
6738,
1341,
9060,
13,
22602,
1330,
13833,
198,
198,
6738,
1341,
9060,
135... | 2.509804 | 102 |
#!/usr/bin/env python
import requests
from suds.client import Client
import base64
from cStringIO import StringIO
import xml.etree.ElementTree as ET
import datetime
import argparse
import urlparse
import logging
import settings
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
7007,
198,
6738,
424,
9310,
13,
16366,
1330,
20985,
198,
198,
11748,
2779,
2414,
198,
6738,
269,
10100,
9399,
1330,
10903,
9399,
198,
11748,
35555,
13,
316,
631,
13,
20180,
... | 3.304878 | 82 |
#!/usr/bin/python3
import brownie
def test_check_bounds(accounts, nft):
"""check bounds"""
with brownie.reverts("dev: index out of bounds"):
nft.transferRange(accounts[2], 0, 1000, {"from": accounts[0]})
with brownie.reverts("dev: index out of bounds"):
nft.transferRange(accounts[2], 1000000, 1000, {"from": accounts[0]})
with brownie.reverts("dev: index out of bounds"):
nft.transferRange(accounts[2], 1, 1, {"from": accounts[0]})
with brownie.reverts("dev: index out of bounds"):
nft.transferRange(accounts[2], 1, 1000000, {"from": accounts[0]})
def test_stop_start(accounts, nft):
"""stop below start"""
with brownie.reverts("dev: stop < start"):
nft.transferRange(accounts[2], 2000, 1000, {"from": accounts[1]})
def test_multiple_ranges(accounts, nft):
"""multiple ranges"""
with brownie.reverts("dev: multiple ranges"):
nft.transferRange(accounts[2], 1000, 15000, {"from": accounts[1]})
with brownie.reverts("dev: multiple ranges"):
nft.transferRange(accounts[2], 10000, 10002, {"from": accounts[1]})
def test_not_owner(accounts, nft):
"""sender does not own range"""
with brownie.reverts("dev: sender does not own"):
nft.transferRange(accounts[3], 11000, 12000, {"from": accounts[1]})
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
7586,
494,
628,
198,
4299,
1332,
62,
9122,
62,
65,
3733,
7,
23317,
82,
11,
299,
701,
2599,
198,
220,
220,
220,
37227,
9122,
22303,
37811,
198,
220,
220,
220,
351,
7586,
49... | 2.553398 | 515 |
import csv
filename="wikipedia.annotated.csv"
f = open(filename)
csv_f = csv.reader(f)
text_corpus=[]
csv_f.next()
for row in csv_f:
text_corpus.append((row[2],float(row[-1])))
text_corpus.sort(key=lambda l: l[1])
top=[]
for i in range(len(text_corpus)/4):
top.append(i)
| [
11748,
269,
21370,
198,
34345,
2625,
31266,
13,
34574,
515,
13,
40664,
1,
198,
198,
69,
796,
1280,
7,
34345,
8,
198,
40664,
62,
69,
796,
269,
21370,
13,
46862,
7,
69,
8,
198,
5239,
62,
10215,
79,
385,
28,
21737,
198,
40664,
62,
... | 2.161538 | 130 |
from __future__ import absolute_import, print_function, division
from six.moves import map, range
import itertools
import operator
from copy import deepcopy
from collections import OrderedDict, defaultdict
import numpy as np
from coffee.visitor import Visitor
from coffee.base import Sum, Sub, Prod, Div, ArrayInit, SparseArrayInit
__all__ = ["ReplaceSymbols", "CheckUniqueness", "Uniquify", "Evaluate",
"EstimateFlops", "ProjectExpansion", "Reconstructor"]
class ReplaceSymbols(Visitor):
"""Replace named symbols in a tree, returning a new tree.
:arg syms: A dict mapping symbol names to new Symbol objects.
:arg key: a callable to generate a key from a Symbol, defaults to
the string representation.
:arg copy_result: optionally copy the new Symbol whenever it is
used (guaranteeing that it will be unique)"""
visit_Node = Visitor.maybe_reconstruct
class CheckUniqueness(Visitor):
"""
Check if all nodes in a tree are unique instances.
"""
# Some lists appear in operands()
class Uniquify(Visitor):
"""
Uniquify all nodes in a tree by recursively calling reconstruct
"""
visit_Node = Visitor.always_reconstruct
class Evaluate(Visitor):
@classmethod
"""
Symbolically evaluate an expression enclosed in a loop nest, provided that
all of the symbols involved are constants and their value is known.
Return a dictionary mapping symbol names to (newly created) Decl nodes, each
declaration being initialized with a proper (newly computed and created)
ArrayInit object.
:arg decls: dictionary mapping symbol names to known Decl nodes.
:arg track_zeros: True if the evaluated arrays are expected to be block-sparse
and the pattern of zeros should be tracked.
"""
default_args = dict(loop_nest=[])
class ProjectExpansion(Visitor):
@classmethod
"""
Project the output of expression expansion.
The caller should provid a collection of symbols C. The expression tree (nodes
that are not of type :class:`~.Expr` are not allowed) is visited and a set of
tuples returned, one tuple for each symbol in C. Each tuple represents the subset
of symbols in C that will appear in at least one term after expansion.
For example, be C = [a, b], and consider the following input expression: ::
(a*c + d*e)*(b*c + b*f)
After expansion, the expression becomes: ::
a*c*b*c + a*c*b*f + d*e*b*c + d*e*b*f
In which there are four product terms. In these terms, there are two in which
both 'a' and 'b' appear, and there are two in which only 'b' appears. So the
visit will return [(a, b), (b,)].
:arg symbols: the collection of symbols searched for
"""
class EstimateFlops(Visitor):
"""
Estimate the number of floating point operations a tree performs.
Does not look inside flat blocks, and all function calls are
assumed flop free, so this probably underestimates the number of
flops performed.
Also, these are "effective" flops, since the compiler may do fancy
things.
"""
class Reconstructor(Visitor):
"""
Recursively reconstruct abstract syntax trees.
"""
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
7297,
198,
6738,
2237,
13,
76,
5241,
1330,
3975,
11,
2837,
198,
198,
11748,
340,
861,
10141,
198,
11748,
10088,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
172... | 3.20398 | 1,005 |
import copy
| [
11748,
4866,
628,
198
] | 3.5 | 4 |
"""
Created on Sat Feb 8 2021 - 13:28
Mastan Abdulkhaligli (CONTACT) 21403007
Okan Sen 21202377
Asadullah Farooqi
Nurlan Farzaliyev 21503756
Utku Baris Yuksek 21602412
This program solves the river crossing problem of the cannibals and missionaries using NDS.
The homework consists of 6 missionaries, 6 cannibals, and a boat of 5 seats.
"""
import random
class Node(object):
"our node class. used for nds search and to show the parent"
"by using the new_states function, yields new child nodes with parent as self"
"used to obtain the solution. Adds nodes starting from the goal state and reverses the list at the end for the solution."
"checks for the state in the state_list"
class State(object):
"""
Left side of the river is set to state '0' and right side of the river is set to state '1'
"""
"constructor"
"prints the state"
"checks if the state is valid"
"returns true if goal state is met, i.e. 0 0 1"
"yields new states by iterating on possible m and c values. moves the boat to the other side and checks for validity too"
"checks if two states are equal"
"starts checking for a solution for the current problem "
"which consists of 6 cannbals and 6 missionaries with a boat of 5 seats"
print()
begin_state = State(6, 6, 5, 6, 6, 0, "Starting State")
root_node = Node(parent=None, state=begin_state, depth=0)
solution_arr = []
# This loop keeps calling NDS until it finds the most optimal solution, where the total number of crossings is 7
while True:
solution_arr = NDS(root_node)
if len(solution_arr) == 8:
break
for item in solution_arr:
print(str(item.state))
print("The number of total crossings: 7")
"""Output can vary depending on the random node selection
due to the behaviour of Non Determinist search. However,
our program can solve the problem in 7 steps. The solution
never uses the same states more than once, thus making it
loop-free.
Sample Result:
Starting State, 6 6 0
Moved 0 missionaries and 5 cannibals from the LEFT side to the RIGHT, 6 1 1\n
Moved 0 missionaries and 2 cannibals from the RIGHT side to the LEFT, 6 3 0
Moved 4 missionaries and 1 cannibals from the LEFT side to the RIGHT, 2 2 1
Moved 1 missionaries and 1 cannibals from the RIGHT side to the LEFT, 3 3 0
Moved 3 missionaries and 0 cannibals from the LEFT side to the RIGHT, 0 3 1
Moved 0 missionaries and 2 cannibals from the RIGHT side to the LEFT, 0 5 0
Moved 0 missionaries and 5 cannibals from the LEFT side to the RIGHT, 0 0 1
"""
| [
37811,
201,
198,
41972,
319,
7031,
3158,
807,
33448,
532,
1511,
25,
2078,
201,
198,
201,
198,
44,
459,
272,
17870,
12171,
14201,
328,
4528,
357,
37815,
10659,
8,
28277,
3070,
25816,
201,
198,
18690,
272,
2311,
220,
362,
10232,
1954,
3... | 3.042007 | 857 |
#!/usr/bin/env python
'''
Copyright (C) 2006 Georg Wiora, xorx@quarkbox.de
Copyright (C) 2006 Johan Engelen, johan@shouraizou.nl
Copyright (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Changes:
* This program is a modified version of wavy.py by Aaron Spike.
* 22-Dec-2006: Wiora : Added axis and isotropic scaling
'''
import inkex, simplepath, simplestyle
from math import *
from random import *
e = FuncPlot()
e.affect()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
7061,
6,
198,
15269,
357,
34,
8,
4793,
6850,
370,
1504,
64,
11,
2124,
273,
87,
31,
421,
668,
3524,
13,
2934,
198,
15269,
357,
34,
8,
4793,
16053,
272,
46073,
268,
11,
474,
2... | 3.372727 | 330 |
import numpy as np
from copy import deepcopy
from scipy.spatial import distance_matrix
from autode.log.methods import methods
from autode.conformers.conformers import get_unique_confs
from autode.solvent.solvents import ExplicitSolvent
from autode.solvent.solvents import get_solvent
from autode.calculation import Calculation
from autode.config import Config
from autode.input_output import atoms_to_xyz_file
from autode.mol_graphs import is_isomorphic
from autode.log import logger
from autode.methods import get_lmethod, get_hmethod
from autode.mol_graphs import make_graph
from autode.utils import requires_atoms
from autode.utils import work_in
from autode.utils import requires_conformers
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
629,
541,
88,
13,
2777,
34961,
1330,
220,
5253,
62,
6759,
8609,
198,
6738,
1960,
1098,
13,
6404,
13,
24396,
82,
1330,
5050,
198,
6738,
1960,
1098,
13,
1102,... | 3.36715 | 207 |
"""
This gets the meta-data containing filenames, location in MB on where to start
and the filesize
This is embedded in an iterative loop over process rank to decide which files
and or chunks of files to send to each process
Initial plan was to include the MPI send/recv here.
Kept this part wrapped to make more modular/easier to read
"""
import os
from mpi4py import MPI
| [
198,
37811,
198,
1212,
3011,
262,
13634,
12,
7890,
7268,
1226,
268,
1047,
11,
4067,
287,
10771,
319,
810,
284,
923,
198,
392,
262,
3696,
1096,
198,
198,
1212,
318,
14553,
287,
281,
11629,
876,
9052,
625,
1429,
4279,
284,
5409,
543,
... | 3.732673 | 101 |
# -*- coding: utf-8 -*-
""" KDTree class."""
import numpy as np
from utils import squared_euclidean_distance
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
509,
24544,
631,
1398,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3384,
4487,
1330,
44345,
62,
12496,
565,
485,
272,
62,
30246,
628,
628... | 2.555556 | 45 |
from digital_land.issues import Issues
from digital_land.datatype.point import PointDataType
| [
6738,
4875,
62,
1044,
13,
37165,
1330,
22852,
198,
6738,
4875,
62,
1044,
13,
19608,
265,
2981,
13,
4122,
1330,
6252,
6601,
6030,
628,
628,
198
] | 3.730769 | 26 |
# -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
from __future__ import print_function
from setuptools import setup, find_packages
from parrots import __version__
long_description = '''
## Usage
### install
* pip3 install parrots
* Or
```
git clone https://github.com/shibing624/parrots.git
cd pyrrots
python3 setup.py install
```
### speech recognition
input:
```
import parrots
text = parrots.speech_recognition_from_file('./16k.wav')
print(text)
```
output:
```
北京图书馆
```
### tts
input:
```
import parrots
audio_file_path = parrots.synthesize('北京图书馆')
print(audio_file_path)
```
output:
```
北京图书馆 语音文件路径
```
'''
setup(
name='parrots',
version=__version__,
description='Chinese Text To Speech and Speech Recognition',
long_description=long_description,
author='XuMing',
author_email='xuming624@qq.com',
url='https://github.com/shibing624/parrots',
license="Apache 2.0",
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP'
],
keywords='TTS, chinese text to speech, speech',
install_requires=[
'pypinyin',
'pydub',
'pyaudio',
'jieba'
],
packages=find_packages(exclude=['tests']),
package_dir={'parrots': 'parrots'},
package_data={
'parrots': ['*.*', 'LICENSE', 'README.*', 'data/*', 'utils/*', 'data/pinyin2hanzi/*', 'data/speech_model/*']}
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
33591,
44,
278,
1279,
87,
12595,
21,
1731,
31,
38227,
13,
785,
29,
198,
2,
22821,
25,
220,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738... | 2.418136 | 794 |
from deap import tools, base, creator, gp
import pandas as pd
import operator
import math
import random
import numpy as np
import benchmarks
import algo
import argparse
import json
# decription of the benchmarks
benchmark_description = [
{'name': 'keijzer-6',
'variables': 1,
'pset': benchmarks.get_primitive_set_for_benchmark('keijzer-6', 1)},
{'name': 'korns-12',
'variables': 5,
'pset': benchmarks.get_primitive_set_for_benchmark('korns-12', 5)},
{'name': 'pagie-1',
'variables': 2,
'pset': benchmarks.get_primitive_set_for_benchmark('pagie-1', 2)},
{'name': 'nguyen-7',
'variables': 1,
'pset': benchmarks.get_primitive_set_for_benchmark('nguyen-7', 1)},
{'name': 'vladislavleva-4',
'variables': 5,
'pset': benchmarks.get_primitive_set_for_benchmark('vladislavleva-4', 5)}
]
version_info = json.load(open('version.json', 'r'))
version = version_info['version']
# parse command-line arguments
parser = argparse.ArgumentParser(description='Run GP with surrogate model')
parser.add_argument('--problem_number', '-P', type=int, help='The number of problem to start', default=0)
parser.add_argument('--use_surrogate', '-S', help='Whether to use surrogate', action='store_true')
args = parser.parse_args()
bench_number = args.problem_number
# get the primitive set for the selected benchmark
pset = benchmark_description[bench_number]['pset']
# create the types for fitness and individuals
creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMin, pset=pset)
# create the toolbox
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=5)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
# define the fitness function (log10 of the rmse or 1000 if overflow occurs)
# register the selection and genetic operators - tournament selection and, one point crossover and sub-tree mutation
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=1, max_=4)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
# set height limits for the trees
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
def run_baseline(i, x, y):
""" Executes one run of the baseline algorithm
:param i: number of the run
:param x: the values for the training instances
:param y: the targets for the training instances
:return: population in the last generation, log of the run, and the hall-of-fame,
"""
# set seed to the number of the run
random.seed(i)
np.random.seed(i)
# register fitness function with the right x and y
toolbox.register("evaluate", eval_symb_reg, points=x, values=y)
# create population
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
# create the stats object
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
# run the baseline algorithm
pop, log = algo.ea_baseline_simple(pop, toolbox, 0.2, 0.7, 200,
stats=mstats, halloffame=hof, verbose=True, n_jobs=-1)
return pop, log, hof
def run_model_test(i, x, y):
""" Executes one run of the model tests
:param i: number of the run
:param x: the values for the training instances
:param y: the targets for the training instances
:return: population in the last generation, log of the run, and the hall-of-fame,
"""
# set seed to the number of the run
random.seed(i)
np.random.seed(i)
# register fitness function with the right x and y
toolbox.register("evaluate", eval_symb_reg, points=x, values=y)
# create population
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
# create the stats object
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
# run the baseline algorithm
pop, log, feat_imp = algo.ea_baseline_model(pop, toolbox, 0.2, 0.7, 110,
stats=mstats, halloffame=hof, verbose=True, n_jobs=-1, pset=pset)
return pop, log, hof, feat_imp
def run_surrogate(i, x, y):
""" Executes one run of the surrogate algorithm
:param i: number of the run
:param x: the values for the training instances
:param y: the targets for the training instances
:return: population in the last generation, log of the run, and the hall-of-fame,
"""
# set seed to the number of the run
random.seed(i)
np.random.seed(i)
# register fitness with the correct x and y
toolbox.register("evaluate", eval_symb_reg, points=x, values=y)
# create the initial population
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
# create the stats object
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
# run the surrogate algorithm
pop, log = algo.ea_surrogate_simple(pop, toolbox, 0.2, 0.7, 15000, pset=pset,
stats=mstats, halloffame=hof, verbose=True, n_jobs=-1)
return pop, log, hof
def run_all_baseline():
""" Wrapper to start 25 runs of the baseline algorithm and store the results
"""
pdlogs = pd.DataFrame()
# get the name of the benchmark
b_name = benchmark_description[bench_number]['name']
# run the 15 runs
for i in range(25):
# read data for this run
data = pd.read_csv('benchmarks/{bname}-train.{num}.csv'.format(bname=b_name, num=i+1), sep=';')
y = data['y'].values
data = data.drop('y', axis=1)
x = data.values
# start the baseline algorithm
pop, log, hof = run_baseline(i, x, y)
# append the min fitness from this run to the log
pdlog = pd.Series(log.chapters['fitness'].select('min'), index=np.cumsum(log.select('nevals')),
name='run_' + str(i))
pdlogs = pd.concat([pdlogs, pdlog], axis=1)
# store the logs
pdlogs.to_csv('output/baseline.{bname}.v{version}.csv'.format(bname=b_name, version=version))
def run_all_surrogate():
""" Wrapper to start 25 runs of the surrogate algorithm and store the results
"""
pdlogs = pd.DataFrame()
# get the name of the benchmark
b_name = benchmark_description[bench_number]['name']
# make the 15 runs
for i in range(25):
# read training data for this run
data = pd.read_csv('benchmarks/{bname}-train.{num}.csv'.format(bname=b_name, num=i+1), sep=';')
y = data['y'].values
data = data.drop('y', axis=1)
x = data.values
# start the surrogate algorithm
pop, log, hof = run_surrogate(i, x, y)
# concat the log from this run to the logs
pdlog = pd.Series(log.chapters['fitness'].select('min'), index=np.cumsum(log.select('nevals')),
name='run_' + str(i))
pdlogs = pd.concat([pdlogs, pdlog], axis=1)
# store the logs
pdlogs.to_csv('output/surrogate.{bname}.v{version}.csv'.format(bname=b_name, version=version))
def run_all_model_tests():
""" Wrapper to start 25 runs of the model testing (Spearman corellation and feature imporatences)
"""
pdlogs = pd.DataFrame()
# get the name of the benchmark
b_name = benchmark_description[bench_number]['name']
# run the 15 runs
for i in range(25):
# read data for this run
data = pd.read_csv('benchmarks/{bname}-train.{num}.csv'.format(bname=b_name, num=i+1), sep=';')
y = data['y'].values
data = data.drop('y', axis=1)
x = data.values
# start the baseline algorithm
pop, log, hof, feat_imp = run_model_test(i, x, y)
# append the min fitness from this run to the log
pdlog = pd.Series(log.select('spear'), index=np.cumsum(log.select('nevals')),
name='run_' + str(i))
pdlogs = pd.concat([pdlogs, pdlog], axis=1)
# feat_imp.to_csv('output/feats_rf_def.{name}.{run}.v{version}.csv'.format(name=b_name, run=i, version=version))
# store the logs
pdlogs.to_csv('output/spear.rf_inf.{bname}.v{version}.csv'.format(bname=b_name, version=version))
if __name__ == "__main__":
main()
| [
6738,
390,
499,
1330,
4899,
11,
2779,
11,
13172,
11,
27809,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
10088,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
31747,
198,
11748,
435,
2188,
198,... | 2.524844 | 3,683 |
import unittest
from core.internal_repr import parameter
from core.util_classes.matrix import Vector2d
from core.util_classes import common_predicates
from core.util_classes.openrave_body import OpenRAVEBody
from core.util_classes import items
from errors_exceptions import PredicateException
from sco import expr
import numpy as np
from openravepy import Environment
from collections import OrderedDict
N = 10
## exprs for testing
e1 = expr.Expr(lambda x: np.array([x]))
e2 = expr.Expr(lambda x: np.power(x, 2))
if __name__ is "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
4755,
13,
32538,
62,
260,
1050,
1330,
11507,
198,
6738,
4755,
13,
22602,
62,
37724,
13,
6759,
8609,
1330,
20650,
17,
67,
198,
6738,
4755,
13,
22602,
62,
37724,
1330,
2219,
62,
28764,
16856,
198,
6738,
... | 3.179775 | 178 |
from flask import Blueprint, redirect, url_for, flash, render_template
from flask_login import login_user, current_user, logout_user, login_required
from blueblog.models import Admin
from blueblog.forms import LoginForm
from blueblog.utils import redirect_back
auth_bp = Blueprint('auth', __name__)
@auth_bp.route('/login', methods=['GET', 'POST'])
@auth_bp.route('/logout')
@login_required | [
6738,
42903,
1330,
39932,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
11,
8543,
62,
28243,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
7220,
11,
1459,
62,
7220,
11,
2604,
448,
62,
7220,
11,
17594,
62,
35827,
198,
6738,
4171,
14036,
... | 3.338983 | 118 |
from menuClass import MenuClass
MenuClass().menu(MenuClass().chooseLanguage())
| [
6738,
6859,
9487,
1330,
21860,
9487,
198,
23381,
9487,
22446,
26272,
7,
23381,
9487,
22446,
6679,
577,
32065,
28955,
198
] | 3.95 | 20 |
import abc
| [
11748,
450,
66,
201
] | 2.75 | 4 |
from package_name.source import hi
import pytest
@pytest.mark.parametrize("tuple1,tuple2", [
("2", "10"),
("1", "-4"),
])
@pytest.mark.skip(reason="I haven't implemented this in the module yet.")
@pytest.mark.xfail
| [
6738,
5301,
62,
3672,
13,
10459,
1330,
23105,
198,
11748,
12972,
9288,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7203,
83,
29291,
16,
11,
83,
29291,
17,
1600,
685,
198,
220,
220,
220,
5855,
17,
1600,
366,
... | 2.549451 | 91 |
import pytest
from os import urandom
from decimal import Decimal
import time
from btcpy.structs.transaction import Locktime
from btcpy.structs.sig import P2pkhSolver
from pypeerassets.kutil import Kutil
from pypeerassets.provider import Explorer
from pypeerassets.networks import net_query
from pypeerassets.transactions import (
MutableTransaction,
Transaction,
find_parent_outputs,
p2pkh_script,
tx_output,
)
def test_key_generation():
'''test privkey/pubkey generation.'''
mykey = Kutil(network="ppc")
assert isinstance(mykey.privkey, str)
assert isinstance(mykey.pubkey, str)
def test_key_generation_from_seed():
'''check if key generation is what is expected from seed.'''
seed = "Hello PeerAssets."
mykey = Kutil(from_string=seed, network="tppc")
assert mykey.privkey == '680510f7f5e622347bc8d9e54e109a9192353693ef61d82d2d5bdf4bc9fd638b'
assert mykey.pubkey == '037cf9e7664b5d10ce209cf9e2c7f68baa06f1950114f25677531b959edd7e670c'
def test_address_generation():
'''test if addresses are properly made'''
privkey = bytearray(urandom(32))
assert Kutil(network="ppc", privkey=privkey).address.startswith("P")
assert isinstance(Kutil(network='ppc').address, str)
assert len(Kutil(network='ppc').address) == 34
def test_mainnet_wif_import():
'''test importing WIF privkey'''
mykey = Kutil(network='ppc', from_wif="U624wXL6iT7XZ9qeHsrtPGEiU78V1YxDfwq75Mymd61Ch56w47KE")
assert mykey.address == 'PAprodbYvZqf4vjhef49aThB9rSZRxXsM6'
assert mykey.pubkey == '023aaca6c4f022543f4a2920f66544a6ce89746f7fce4da35d63b5886fdac06634'
assert mykey.privkey == '1b19749afd007bf6db0029e0273a46409bc160b9349031752bbc3cd913bbbdd3'
def test_wif_export():
'''test Kutil WIF export'''
mykey = Kutil(network='ppc',
privkey=bytearray.fromhex('1b19749afd007bf6db0029e0273a46409bc160b9349031752bbc3cd913bbbdd3')
)
assert isinstance(mykey.wif, str)
assert mykey.wif == 'U624wXL6iT7XZ9qeHsrtPGEiU78V1YxDfwq75Mymd61Ch56w47KE'
| [
11748,
12972,
9288,
198,
6738,
28686,
1330,
2956,
3749,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
640,
198,
198,
6738,
275,
23047,
9078,
13,
7249,
82,
13,
7645,
2673,
1330,
13656,
2435,
198,
6738,
275,
23047,
9078,
13,
7249,
82,
... | 2.261202 | 915 |
# Generated by Django 2.0.1 on 2018-01-08 23:05
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
16,
319,
2864,
12,
486,
12,
2919,
2242,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
6... | 2.818182 | 44 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.ai.DistributedPhaseEventMgrAI import DistributedPhaseEventMgrAI
from otp.ai.MagicWordGlobal import *
@magicWord(category=CATEGORY_PROGRAMMER, types=[int])
def setSillyMeterPhase(phase):
'''
Sets the Silly Meters Phase!
'''
if phase > 15:
return("Phase is too high! (-1 to 15) can be used!")
if phase < -1:
return("Phase is too low! (-1 to 15) can be used!")
if phase == -1:
simbase.air.sillyMeterMgr.b_setCurPhase(phase)
simbase.air.sillyMeterMgr.b_setIsRunning(False)
messenger.send('SillyMeterPhase', [phase])
return("Turned Off The Silly Meter!")
simbase.air.sillyMeterMgr.b_setCurPhase(phase)
simbase.air.sillyMeterMgr.b_setIsRunning(True)
messenger.send('SillyMeterPhase', [phase])
return "Set Silly Meters Phase!" | [
6738,
1277,
13,
12942,
1662,
1958,
1330,
4128,
3673,
1958,
22289,
198,
6738,
1277,
13,
17080,
6169,
13,
20344,
6169,
10267,
20185,
1330,
4307,
6169,
10267,
20185,
198,
6738,
284,
756,
593,
13,
1872,
13,
20344,
6169,
35645,
9237,
44,
216... | 2.481865 | 386 |
CLIENT_ID = ""
CLIENT_SECRET = ""
API_KEY = ""
PLAYLIST_ID = ""
PLAYLIST_NAME = ""
REDIRECT_URI = "http://localhost:8080"
| [
5097,
28495,
62,
2389,
796,
13538,
198,
5097,
28495,
62,
23683,
26087,
796,
13538,
198,
198,
17614,
62,
20373,
796,
13538,
198,
31519,
45849,
62,
2389,
796,
13538,
198,
198,
31519,
45849,
62,
20608,
796,
13538,
198,
22083,
40,
23988,
62... | 2.384615 | 52 |
#raki2 if + for loop
while True :
print('please type your name ')
name = input()
if name == 'your name':
break
print ('thank you')
| [
2,
430,
4106,
17,
611,
1343,
329,
9052,
201,
198,
4514,
6407,
1058,
201,
198,
220,
220,
220,
3601,
10786,
29688,
2099,
534,
1438,
705,
8,
201,
198,
220,
220,
220,
1438,
796,
5128,
3419,
201,
198,
220,
220,
220,
611,
1438,
6624,
70... | 2.347826 | 69 |
seg1 = float(input('Segmento 1: '))
seg2 = float(input('Segmento 2: '))
seg3 = float(input('Segmento 3: '))
if seg1 < seg2 + seg3 and seg2 < seg1 + seg3 and seg3 < seg1 + seg2:
print('Os segmentos informados PODEM forma um triângulo')
else:
print('Os segmentos informados NÃO PODEM forma um triângulo.')
| [
325,
70,
16,
796,
12178,
7,
15414,
10786,
41030,
434,
78,
352,
25,
705,
4008,
198,
325,
70,
17,
796,
12178,
7,
15414,
10786,
41030,
434,
78,
362,
25,
705,
4008,
198,
325,
70,
18,
796,
12178,
7,
15414,
10786,
41030,
434,
78,
513,
... | 2.268116 | 138 |
import numpy as np
from numpy import inf
import deerlab as dl
from deerlab import dipolarkernel, whitegaussnoise, fitmodel
from deerlab.dd_models import dd_gauss
from deerlab.bg_models import bg_exp, bg_hom3d
from deerlab.ex_models import ex_4pdeer, ex_5pdeer, ex_7pdeer, ex_ovl4pdeer
import deerlab as dl
from deerlab.utils import ovl
# --------------------------------------------------------------------
def test_4pdeer():
# ======================================================================
"Check that the fit of a 4-pulse DEER signal is correct"
assert_experiment_model(ex_4pdeer)
# ======================================================================
def test_5pdeer():
# ======================================================================
"Check that the fit of a 4-pulse DEER signal is correct"
assert_experiment_model(ex_5pdeer)
# ======================================================================
def test_7pdeer():
# ======================================================================
"Check that the fit of a 4-pulse DEER signal is correct"
assert_experiment_model(ex_7pdeer)
# ======================================================================
def test_ovl4pdeer():
# ======================================================================
"Check that the fit of a 4-pulse DEER signal is correct"
assert_experiment_model(ex_ovl4pdeer)
# ======================================================================
def test_ridme1():
# ======================================================================
"Check that the fit of a S=1/2 RIDME signal is correct"
assert_experiment_model(dl.ex_ridme1)
# ======================================================================
def test_ridme3():
# ======================================================================
"Check that the fit of a S=3/2 RIDME signal is correct"
assert_experiment_model(dl.ex_ridme3)
# ======================================================================
def test_ridme5():
# ======================================================================
"Check that the fit of a S=5/2 RIDME signal is correct"
assert_experiment_model(dl.ex_ridme5)
# ======================================================================
def test_dipevo_function():
# ======================================================================
"Check that the fit of a dipolar evolution function is correct"
t = np.linspace(0,5,100)
r = np.linspace(2,7,150)
P = dd_gauss(r,[4.5, 0.25])
K = dipolarkernel(t,r)
V = K@P
fit = fitmodel(V,t,r,'P',None,None,uq=None)
assert ovl(P,fit.P) > 0.90
# ======================================================================
def test_form_factor():
# ======================================================================
"Check that the fit of a simple form factor is correct"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
K = dipolarkernel(t,r,mod=0.3)
V = K@P
fit = fitmodel(V,t,r,'P',None,ex_4pdeer,uq=None)
assert ovl(P,fit.P) > 0.90
# ======================================================================
def test_full_parametric():
# ======================================================================
"Check that the fit of full parametric model"
t = np.linspace(0,5,100)
r = np.linspace(2,6,200)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.3
kappa = 0.2
B = bg_exp(t,lam*kappa)
V = dipolarkernel(t,r,mod=lam,bg=B)@P
fit = fitmodel(V,t,r,dd_gauss,bg_exp,ex_4pdeer,uq=None)
assert ovl(P,fit.P) > 0.99
# ======================================================================
def test_no_foreground():
# ======================================================================
"Check that the fit of a pure background works"
t = np.linspace(0,5,500)
r = np.linspace(2,6,200)
k = 0.2
B = bg_exp(t,k)
fit = fitmodel(B,t,r,None,bg_exp,None,uq=None)
assert abs(k-fit.bgparam) < 1
# ======================================================================
def test_start_values():
# ======================================================================
"Check that starts values can be correctly specified"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.4
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=lam,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer,bg_par0=0.5,ex_par0=0.5,uq=None)
assert ovl(P,fit.P) > 0.95
# ======================================================================
def test_boundaries():
# ======================================================================
"Check that boundaries can be correctly specified"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.4
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=lam,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer, uq=None,
bg_par0=0.4, bg_lb=0.2, bg_ub=0.5,
ex_par0=0.4, ex_lb=0.2, ex_ub=0.5)
assert ovl(P,fit.P) > 0.95
# ======================================================================
def test_boundaries_adjust_bg():
# ======================================================================
"Check that start values are adjusted when defining bounds leaving default par0 out"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
lam,conc = 0.4,80
Bmodel = lambda t,lam: bg_hom3d(t,conc,lam)
K = dipolarkernel(t,r,mod=lam,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_hom3d,ex_4pdeer, uq=None, bg_lb=70, bg_ub=90)
assert ovl(P,fit.P) > 0.95
# ======================================================================
def test_boundaries_adjust_ex():
# ======================================================================
"Check that start values are adjusted when defining bounds leaving default par0 out"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
lam,conc = 0.6,80
Bmodel = lambda t,lam: bg_hom3d(t,conc,lam)
K = dipolarkernel(t,r,mod=lam,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_hom3d,ex_4pdeer, uq=None, ex_lb=0.55, ex_ub=0.65)
assert ovl(P,fit.P) > 0.95
# ======================================================================
def test_boundaries_adjust_dd():
# ======================================================================
"Check that start values are adjusted when defining bounds leaving default par0 out"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.35])
lam,conc = 0.4,80
Bmodel = lambda t,lam: bg_hom3d(t,conc,lam)
K = dipolarkernel(t,r,mod=lam,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,dd_gauss,bg_hom3d,ex_4pdeer, uq=None, dd_lb=[4,0.3],dd_ub=[6,0.5])
assert ovl(P,fit.P) > 0.95
# ======================================================================
def test_global_4pdeer():
# ======================================================================
"Check the correct fit of two 4-DEER signals"
r = np.linspace(2,6,90)
P = dd_gauss(r,[4.5, 0.3])
parIn = ex_4pdeer.start
pathways = ex_4pdeer(parIn)
kappa = 0.4
Bmodel = lambda t: bg_exp(t,kappa)
t1 = np.linspace(0,5,100)
V1 = dipolarkernel(t1,r,pathways=pathways,bg=Bmodel)@P
t2 = np.linspace(0,5,250)
V2 = dipolarkernel(t2,r,pathways=pathways,bg=Bmodel)@P
fit = fitmodel([V1,V2],[t1,t2],r,'P',bg_exp,ex_4pdeer,uq=None)
assert ovl(P,fit.P) > 0.90
# ======================================================================
def test_global_full_parametric():
# ======================================================================
"Check global fitting with fully parametric models"
r = np.linspace(2,6,200)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.3
kappa = 0.4
B = lambda t: bg_exp(t,kappa)
t1 = np.linspace(0,5,100)
t2 = np.linspace(0,3,200)
V1 = dipolarkernel(t1,r,mod=lam,bg=B)@P
V2 = dipolarkernel(t2,r,mod=lam,bg=B)@P
fit = fitmodel([V1,V2],[t1,t2],r,dd_gauss,bg_exp,ex_4pdeer,uq=None)
assert ovl(P,fit.P) > 0.99
# ======================================================================
def test_global_mixed_backgrounds():
# ======================================================================
"Check global fitting with different background models"
r = np.linspace(2,6,200)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.3
kappa = 0.4
B = lambda t: bg_exp(t,kappa)
t1 = np.linspace(0,5,100)
t2 = np.linspace(0,3,200)
V1 = dipolarkernel(t1,r,mod=lam,bg=B)@P
V2 = dipolarkernel(t2,r,mod=lam)@P
fit = fitmodel([V1,V2],[t1,t2],r,dd_gauss,[bg_exp,None],ex_4pdeer,uq=None)
assert ovl(P,fit.P) > 0.90
# ======================================================================
def test_global_mixed_experiments():
# ======================================================================
"Check global fitting with different experiment models"
r = np.linspace(2,6,200)
P = dd_gauss(r,[4.5, 0.25])
lam = 0.3
t1 = np.linspace(0,5,100)
t2 = np.linspace(0,3,200)
V1 = dipolarkernel(t1,r,mod=lam)@P
V2 = dipolarkernel(t2,r)@P
fit = fitmodel([V1,V2],[t1,t2],r,dd_gauss,None,[ex_4pdeer,None],uq=None)
assert ovl(P,fit.P) > 0.9
# ======================================================================
#----------------------------------------------------------------------
#----------------------------------------------------------------------
exmodel = ex_4pdeer
ddmodel = dd_gauss
bgmodel = bg_exp
r = np.linspace(2,6,40)
P = ddmodel(r,[4.5, 0.25])
parIn = exmodel.start
pathways = exmodel(parIn)
kappa = 0.4
Bmodel = lambda t,lam: bgmodel(t,kappa)
t = np.linspace(0,5,100)
np.random.seed(0)
V = dipolarkernel(t,r,pathways=pathways,bg=Bmodel)@P + whitegaussnoise(t,0.01)
fit = fitmodel(V,t,r,ddmodel,bgmodel,exmodel,uq='covariance')
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def test_confinter_exparam():
# ======================================================================
"Check that the confidence inervals for the experiment parameter are correct"
assert_confinter_param('ex')
# ======================================================================
def test_confinter_bgparam():
# ======================================================================
"Check that the confidence inervals for the experiment parameter are correct"
assert_confinter_param('bg')
# ======================================================================
def test_confinter_ddparam():
# ======================================================================
"Check that the confidence inervals for the experiment parameter are correct"
assert_confinter_param('dd')
# ======================================================================
#----------------------------------------------------------------------
exmodel = ex_4pdeer
bgmodel = bg_exp
r = np.linspace(2,6,40)
P = dd_gauss(r,[4.5, 0.25])
parIn = exmodel.start
pathways = exmodel(parIn)
kappa = 0.4
Bmodel = lambda t: bgmodel(t,kappa)
t = np.linspace(0,5,100)
np.random.seed(0)
V = dipolarkernel(t,r,pathways=pathways,bg=Bmodel)@P + whitegaussnoise(t,0.03)
fit_Pparam = fitmodel(V,t,r,ddmodel,bgmodel,exmodel,uq='covariance')
fit_Pfree = fitmodel(V,t,r,'P',bgmodel,exmodel,uq='covariance')
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def test_confinter_Pfit():
# ======================================================================
"Check that the confidence inervals for fitted parametric distribution are correct"
assert_confinter_models('Pfit')
# ======================================================================
def test_confinter_Pfitfree():
# ======================================================================
"Check that the confidence inervals for fitted distribution are correct"
assert_confinter_models('Pfitfree')
# ======================================================================
def test_confinter_Vfit():
# ======================================================================
"Check that the confidence inervals for fitted distribution are correct"
assert_confinter_models('Vfit')
# ======================================================================
def test_confinter_Bfit():
# ======================================================================
"Check that the confidence inervals for fitted distribution are correct"
assert_confinter_models('Bfit')
# ======================================================================
def assert_confinter_noforeground():
# ======================================================================
"Check that the confidence inervals for a pure background fit are correct"
bgmodel = bg_exp
t = np.linspace(0,5,100)
r = np.linspace(2,6,90)
P = dd_gauss(r,[4.5, 0.25])
kappa = 0.4
lam = 0.3
Bmodel = bgmodel(t,kappa)
np.random.seed(0)
V = dipolarkernel(t,r,mod=lam,bg=Bmodel)@P + whitegaussnoise(t,0.01)
fit = fitmodel(V,t,r,None,bgmodel,ex_4pdeer,uq='covariance')
Bfit = fit.B
Buq = fit.Buncert
Bci50 = Buq.ci(50)
Bci95 = Buq.ci(95)
lb = np.full_like(t,-inf)
ub = np.full_like(t,inf)
assert_confidence_intervals(Bci50,Bci95,Bfit,lb,ub)
# ======================================================================
def assert_confinter_dipevofun():
# ======================================================================
"Check that the confidence inervals for a dipolar evolution function fit are correct"
r = np.linspace(2,6,90)
P = dd_gauss(r,[4.5, 0.25])
t = np.linspace(0,5,100)
np.random.seed(0)
V = dipolarkernel(t,r)@P + whitegaussnoise(t,0.01)
fit = fitmodel(V,t,r,'P',None,None,uq='covariance')
Pfit = fit.P
Puq = fit.Puncert
Pci50 = Puq.ci(50)
Pci95 = Puq.ci(95)
lb = np.zeros_like(r,0)
ub = np.full_like(r,inf)
assert_confidence_intervals(Pci50,Pci95,Pfit,lb,ub)
# ======================================================================
def test_global_scale_4pdeer():
# ======================================================================
"Check the correct fit of two 4-DEER signals"
r = np.linspace(2,6,90)
P = dd_gauss(r,[4.5, 0.25])
parIn = ex_4pdeer.start
pathways = ex_4pdeer(parIn)
kappa = 0.4
Bmodel = lambda t: bg_exp(t,kappa)
scales = [1e3,1e9]
t1 = np.linspace(0,5,100)
V1 = scales[0]*dipolarkernel(t1,r,pathways=pathways,bg=Bmodel)@P
t2 = np.linspace(0,5,250)
V2 = scales[1]*dipolarkernel(t2,r,pathways=pathways,bg=Bmodel)@P
fit = fitmodel([V1,V2],[t1,t2],r,'P',bg_exp,ex_4pdeer,uq=None)
assert max(abs(np.asarray(scales)/np.asarray(fit.scale) - 1)) < 5e-2
# ======================================================================
test_global_scale_4pdeer()
def test_V_scale_parametric():
# ======================================================================
"Check that the signal is properly scaled in fully parametric mode"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.2])
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=0.3,bg=Bmodel)
scale = 1.54e6
V = scale*(K@P)
fit = fitmodel(V,t,r,dd_gauss,bg_exp,ex_4pdeer,uq=None)
assert isinstance(fit.scale,float) and abs(1 - scale/fit.scale) < 1e-2
# ======================================================================
def test_V_scale():
# ======================================================================
"Check that the signal is properly scaled in SNLLS mode"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.2])
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=0.3,bg=Bmodel)
scale =1.54e6
V = scale*(K@P)
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer,uq=None)
assert isinstance(fit.scale,float) and abs(1 - scale/fit.scale) < 1e-2
# ======================================================================
def test_V_scale_regularized():
# ======================================================================
"Check that the signal is properly scaled in regularization mode"
t = np.linspace(0,5,200)
r = np.linspace(2,6,200)
P = dd_gauss(r,[4.5, 0.2])
K = dipolarkernel(t,r)
scale = 1.54e6
V = scale*(K@P)
fit = fitmodel(V,t,r,'P',None,None,uq=None)
assert isinstance(fit.scale,float) and abs(1 - scale/fit.scale) < 1e-2
# ======================================================================
def test_plot():
# ======================================================================
"Check that the plot method works"
t = np.linspace(0,5,100)
r = np.linspace(2,6,150)
P = dd_gauss(r,[4.5, 0.25])
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=0.4,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer,uq=None)
fig = fit.plot(show=False)
assert str(fig.__class__)=="<class 'matplotlib.figure.Figure'>"
# ======================================================================
def test_physical_bg_model():
# ======================================================================
"Check that the background parameters of a physical model are fitted correctly"
t = np.linspace(-0.1,7,200)
r = np.linspace(3,5,50)
P = dl.dd_gauss(r,[4,0.2])
V0 = 3000
K = dl.dipolarkernel(t,r,mod=0.4,bg=lambda t,lam:dl.bg_hom3d(t,50,lam))
V = K@P
V = V0*V
fit = dl.fitmodel(V,t,r,'P',dl.bg_hom3d,dl.ex_4pdeer,uq=None)
assert abs(fit.bgparam - 50)<1e-1 and abs(fit.exparam - 0.4)<1e-1
# ======================================================================
def test_phenomenological_bg_model():
# ======================================================================
"Check that the background parameters of a phenomenological model are fitted correctly"
t = np.linspace(-0.1,7,200)
r = np.linspace(3,5,50)
P = dl.dd_gauss(r,[4,0.2])
V0 = 3000
K = dl.dipolarkernel(t,r,pathways=dl.ex_4pdeer(0.4),bg=lambda t: dl.bg_exp(t,0.3))
V = K@P
V = V0*V
fit = dl.fitmodel(V,t,r,'P',dl.bg_exp,dl.ex_4pdeer,uq=None)
assert abs(fit.bgparam - 0.3)<1e-1 and abs(fit.exparam - 0.4)<1e-1
# ======================================================================
def test_Vunmod():
# ======================================================================
"Check that the background scaling is correct if requested"
t = np.linspace(-0.1,7,200)
r = np.linspace(3,5,50)
P = dl.dd_gauss(r,[4,0.2])
lam = 0.4
K = dl.dipolarkernel(t,r,pathways=dl.ex_4pdeer(lam),bg=lambda t,lam: dl.bg_hom3d(t,50,lam))
Bscaled = (1-lam)*dl.bg_hom3d(t,50,lam)
V = K@P
fit = dl.fitmodel(V,t,r,'P',dl.bg_exp,dl.ex_4pdeer,uq=None)
assert max(abs(Bscaled - fit.Vunmod))<1e-4
# ======================================================================
def test_cost_value():
# ======================================================================
"Check that starts values can be correctly specified"
t = np.linspace(0,5,100)
r = np.linspace(2,6,50)
P = dd_gauss(r,[4.5, 0.25])
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=0.4,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer,uq=None)
assert isinstance(fit.cost,float) and np.round(fit.cost/np.sum(fit.residuals**2),5)==1
# ======================================================================
# ----------------------------------------------------------------------
exmodel = ex_4pdeer
bgmodel = bg_exp
r = np.linspace(2,6,40)
P = dd_gauss(r,[4.5, 0.25])
parIn = exmodel.start
pathways = exmodel(parIn)
kappa = 0.4
Bmodel = lambda t: bgmodel(t,kappa)
t = np.linspace(0,5,100)
np.random.seed(0)
V = dipolarkernel(t,r,pathways=pathways,bg=Bmodel)@P + whitegaussnoise(t,0.03)
fit = fitmodel(V,t,r,ddmodel,bgmodel,exmodel,uq=['bootstrap',2])
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def test_bootci_P():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('P')
# ======================================================================
def test_bootci_V():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('V')
# ======================================================================
def test_bootci_B():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('B')
# ======================================================================
def test_bootci_Vmod():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('Vmod')
# ======================================================================
def test_bootci_Vunmod():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('Vunmod')
# ======================================================================
def test_bootci_ddparam():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('ddparam')
# ======================================================================
def test_bootci_exparam():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('exparam')
# ======================================================================
def test_bootci_bparam():
# ======================================================================
"Check that the bootstrapped confidence intervals work"
assert_boot_ci('bgparam')
# ======================================================================
def test_convergence_criteria():
# ======================================================================
"Check that convergence criteria can be specified without crashing"
t = np.linspace(0,5,100)
r = np.linspace(2,6,80)
P = dd_gauss(r,[4.5, 0.25])
Bmodel = lambda t: bg_exp(t,0.4)
K = dipolarkernel(t,r,mod=0.4,bg=Bmodel)
V = K@P
fit = fitmodel(V,t,r,'P',bg_exp,ex_4pdeer,uq=None,tol=1e-3,maxiter=1e2)
assert ovl(P,fit.P) > 0.90
# ======================================================================
| [
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
299,
32152,
1330,
1167,
201,
198,
11748,
20096,
23912,
355,
288,
75,
201,
198,
6738,
20096,
23912,
1330,
19550,
349,
668,
7948,
11,
2330,
4908,
1046,
3919,
786,
11,
4197,
19849,
... | 2.707814 | 8,830 |
from common.carla_snapshot import CarlaSnapshot
import Ice
import adapter_ice
from common.strings import *
import scenario_describer.ScenarioGeneratror as sg
import common.objects as obj
| [
6738,
2219,
13,
7718,
5031,
62,
45380,
9442,
1330,
1879,
5031,
43826,
9442,
198,
11748,
6663,
198,
11748,
21302,
62,
501,
198,
6738,
2219,
13,
37336,
1330,
1635,
198,
11748,
8883,
62,
20147,
24735,
13,
3351,
39055,
8645,
265,
1472,
355,... | 3.603774 | 53 |
# Lint as: python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility functions."""
import logging
from datetime import datetime
from typing import Dict, Tuple
import csv
import os
import pandas as pd
import tensorflow as tf
from tfrecorder import beam_image
from tfrecorder import constants
from tfrecorder import dataset_loader
from tfrecorder import input_schema
from tfrecorder import types
_OUT_IMAGE_TEMPLATE = 'image_{:0>3d}.png'
def _stringify(scalar: tf.Tensor) -> str:
"""Converts scalar tensor into a Python string."""
val = scalar.numpy()
return val.decode('utf-8') if isinstance(val, bytes) else str(val)
def _save_image_from_record(record: Dict[str, tf.Tensor], outfile: str):
"""Extracts image data from parsed TFRecord and saves it to a file."""
b64_image = record['image'].numpy()
image = beam_image.decode(
b64_image,
record['image_width'], record['image_height'], record['image_channels'])
image.save(outfile)
def inspect(
tfrecord_dir: str,
split: str = 'train',
num_records: int = 1,
output_dir: str = 'output'):
"""Prints contents of TFRecord files generated by TFRecorder.
Args:
tfrecord_dir: TFRecord directory.
split: Dataset split (see `schema.allowed_split_values`).
num_records: Number of records to output.
output_dir: Directory to dump read records.
Raises:
`ValueError` when data for a given `split` could not be loaded.
"""
dataset = dataset_loader.load(tfrecord_dir).get(split)
if not dataset:
raise ValueError(f'Could not load data for {split}')
data_dir = os.path.join(
output_dir, 'check-tfrecords-' + get_timestamp())
os.makedirs(data_dir)
with open(os.path.join(data_dir, 'data.csv'), 'wt') as f:
writer = csv.writer(f)
# Write CSV header
for data in dataset.take(1).as_numpy_iterator():
# .as_numpy_iterator() converts from Tuple of Tensors to a dict.
# list() yields the keys of that dict.
#TODO(mikebernico): Check the schema type instead of image key name.
header = [k for k in list(data) if k != 'image']
writer.writerow(header)
for r in dataset.take(num_records):
# Save non-image bytes data to CSV.
# This will save image metadata as well.
row = [_stringify(r[k]) for k in header]
writer.writerow(row)
# Save image data to a file
if 'image_name' in r:
_, image_filename = os.path.split(_stringify(r['image_name']))
image_path = os.path.join(data_dir, image_filename)
_save_image_from_record(r, image_path)
print('Output written to {}'.format(data_dir))
return data_dir
def get_timestamp() -> str:
"""Returns current date and time as formatted string."""
return datetime.now().strftime('%Y%m%d-%H%M%S')
def copy_logfile_to_gcs(logfile: str, output_dir: str):
"""Copies a logfile from local to gcs storage."""
try:
with open(logfile, 'r') as log_reader:
out_log = os.path.join(output_dir, constants.LOGFILE)
with tf.io.gfile.GFile(out_log, 'w') as gcs_logfile:
log = log_reader.read()
gcs_logfile.write(log)
except FileNotFoundError as e:
raise FileNotFoundError("Unable to copy log file {} to gcs.".format(
e.filename)) from e
def _path_split(filepath: str) -> Tuple[str, str]:
"""Splits `filepath` into (head, tail) where `tail` part after last '/'.
e.g.
filepath = '/path/to/image/file.jpg'
head, tail = _path_split(filepath)
# head -> '/path/to/image'
# tail -> 'file.jpg'
Similar to `os.path.split` but supports GCS paths (prefix: gs://).
"""
if filepath.startswith(constants.GCS_PREFIX):
_, path = filepath.split(constants.GCS_PREFIX)
head, tail = os.path.split(os.path.normpath(path))
return constants.GCS_PREFIX + head, tail
return os.path.split(filepath)
def read_image_directory(image_dir: str) -> pd.DataFrame:
"""Reads image data from a directory into a Pandas DataFrame.
Expected directory structure:
image_dir/
<dataset split>/
<label>/
<image file>
Example expected directory structure:
image_dir/
TRAIN/
label0/
image_000.jpg
image_001.jpg
...
label1/
image_100.jpg
...
VALIDATION/
...
Output will be based on `schema.image_csv_schema`.
The subdirectories should only contain image files.
See `beam_image.load` for supported image formats.
"""
rows = []
split_values = types.SplitKey.allowed_values
for root, _, files in tf.io.gfile.walk(image_dir):
if files:
root_, label = _path_split(root)
_, split = _path_split(root_)
if split not in split_values:
logging.warning('Unexpected split value: %s. Skipping %s',
split, root)
# TODO(cezequiel): Add guard for non image files (e.g. .DS_Store)
for f in files:
image_uri = os.path.join(root, f)
row = [split, image_uri, label]
rows.append(row)
return pd.DataFrame(
rows, columns=input_schema.IMAGE_CSV_SCHEMA.get_input_keys())
def create_image_csv(image_dir: str, output_filename: str):
"""Generates an Image CSV file from an image directory."""
df = read_image_directory(image_dir)
return df.to_csv(output_filename, header=False, index=False)
| [
2,
406,
600,
355,
25,
21015,
18,
198,
198,
2,
15069,
12131,
3012,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.64167 | 2,227 |
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
import os
root = os.path.dirname(os.path.abspath(__file__))
os.chdir(root)
VERSION = '0.1.1'
# Make data go to the right place.
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(
name='django-sqlcipher',
version=VERSION,
description="SQLCipher support for Django",
long_description="This module allows your Django project to work with SQLCipher.",
author="Codasus Technologies",
author_email="contact@codasus.com",
url="http://github.com/codasus/django-sqlcipher",
license="Creative Commons Attribution-ShareAlike 3.0 Unported License",
platforms=["any"],
packages=['sqlcipher'],
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Database",
"Topic :: Security :: Cryptography",
],
include_package_data=True,
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
21812,
13,
17350,
1330,
40589,
7036,
62,
50,
3398,
3620,
1546,
198,
11748,
28686,
198,
198,
15763,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
... | 2.73903 | 433 |
from setuptools import setup
setup(name='ifc',
version='0.1',
description='informed finance canary',
url='https://github.com/Darthone/bug-free-octo-parakeet',
author='Dario Marasco',
author_email='',
license='MIT',
packages=['ifc'],
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
361,
66,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
220,
220,
6764,
11639,
35698,
9604,
460,
560,
3256,
198,
220,
220,... | 2.292308 | 130 |
import speech_recognition as sr
import os
from gtts import gTTS
from playsound import playsound
import sys
import configparser
import random
try:
import pyaudio
except Exception as e:
try:
os.system("pipwin install pyaudio")
except Exception as e:
try:
os.system("pip install pyaudio")
except Exception as e:
print("Exception occur ", e)
print("Install pyaudio manually")
import pyaudio
# import custom features
try:
import features.weather.weather as wea
import features.website_open.website_open
import features.send_mail.send_mail
import features.date_time.date_time
import features.launch_app.launch_app
import features.news.news as nw
import features.tell_me_about.tell_me_about as tma
import features.face_recognition.dataset_create as dc
import features.face_recognition.train as train
import features.face_recognition.predict as predict
import features.face_recognition.face_reco as fr
import features.show_me_my_images.show_me_my_images as my_photos
import features.setup.setup as setup_assistant
import features.google_photos.google_photos as gp
import features.joke.joke
import features.hot_word_detection.hot_word_detection as wake_word
except Exception as e:
from JarvisAI.features.weather import weather as wea
from JarvisAI.features.website_open import website_open
from JarvisAI.features.send_mail import send_mail
from JarvisAI.features.date_time import date_time
from JarvisAI.features.launch_app import launch_app
from JarvisAI.features.news import news as nw
from JarvisAI.features.tell_me_about import tell_me_about as tma
from JarvisAI.features.face_recognition import dataset_create as dc
from JarvisAI.features.face_recognition import train as train
from JarvisAI.features.face_recognition import predict as predict
from JarvisAI.features.face_recognition import face_reco as fr
from JarvisAI.features.show_me_my_images import show_me_my_images as my_photos
from JarvisAI.features.setup import setup as setup_assistant
from JarvisAI.features.google_photos import google_photos as gp
from JarvisAI.features.joke import joke
from JarvisAI.features.hot_word_detection import hot_word_detection as wake_word
if __name__ == '__main__':
obj = JarvisAssistant()
# print(obj.text2speech_male())
res = obj.tell_me_joke()
print(res)
# obj.text2speech("hello")
# res = obj.website_opener("facebook.com")
# res = obj.send_mail()
# res = obj.launch_app("edge")
# res = obj.weather("mumbai")
# res = obj.news()
# res = obj.tell_me()
# res = obj.tell_me_time()
# res = obj.tell_me_date()
# res = obj.shutdown()
# obj.datasetcreate()
| [
11748,
4046,
62,
26243,
653,
355,
19677,
198,
11748,
28686,
198,
6738,
308,
83,
912,
1330,
308,
51,
4694,
198,
6738,
5341,
633,
1330,
5341,
633,
198,
11748,
25064,
198,
11748,
4566,
48610,
198,
11748,
4738,
198,
198,
28311,
25,
198,
2... | 2.860772 | 984 |
from .colors import Colors
from .helper import Helper
from .template import Template
from os.path import exists, join
from os import listdir
from threading import Thread
from time import sleep
PKG_ROOT = __file__.strip(__name__ + '.py')
| [
6738,
764,
4033,
669,
1330,
29792,
201,
198,
6738,
764,
2978,
525,
1330,
5053,
525,
201,
198,
6738,
764,
28243,
1330,
37350,
201,
198,
6738,
28686,
13,
6978,
1330,
7160,
11,
4654,
201,
198,
6738,
28686,
1330,
1351,
15908,
201,
198,
67... | 3.207792 | 77 |