content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
##########################################
######## DO NOT MODIFY THIS FILE! ########
## CONFIGURE API INFO THROUGH NZBGET UI ##
##########################################
#####################################
### NZBGET POST-PROCESSING SCRIPT ###
# Script to send post-processing info
# to Watcher.
#####################################
### OPTIONS ###
# Watcher API key.
#Apikey=
# Watcher address.
#Host=http://localhost:9090/
# Verify origin of Watcher's SSL certificate (enabled, disabled).
# enabled - Certificates must be valid (self-signed certs may fail)
# disabled - All certificates will be accepted
#VerifySSL=enabled
### NZBGET POST-PROCESSING SCRIPT ###
#####################################
import json
import os
import sys
import ssl
if sys.version_info.major < 3:
import urllib
import urllib2
urlencode = urllib.urlencode
request = urllib2.Request
urlopen = urllib2.urlopen
else:
import urllib.parse
import urllib.request
request = urllib.request.Request
urlencode = urllib.parse.urlencode
urlopen = urllib.request.urlopen
ctx = ssl.create_default_context()
if os.environ['NZBPO_VERIFYSSL'] != 'enabled':
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
POSTPROCESS_SUCCESS = 93
POSTPROCESS_ERROR = 94
POSTPROCESS_NONE = 95
watcheraddress = os.environ['NZBPO_HOST']
watcherapi = os.environ['NZBPO_APIKEY']
name = os.environ['NZBPP_NZBNAME']
data = {'apikey': watcherapi, 'guid': ''}
# Gather info
if os.environ['NZBPP_URL']:
data['guid'] = os.environ['NZBPP_URL']
data['downloadid'] = os.environ['NZBPP_NZBID']
data['path'] = os.environ['NZBPP_DIRECTORY']
if os.environ['NZBPP_TOTALSTATUS'] == 'SUCCESS':
print(u'Sending {} to Watcher as Complete.'.format(name))
data['mode'] = 'complete'
else:
print(u'Sending {} to Watcher as Failed.'.format(name))
data['mode'] = 'failed'
# Send info
url = u'{}/postprocessing/'.format(watcheraddress)
post_data = urlencode(data).encode('ascii')
request = request(url, post_data, headers={'User-Agent': 'Mozilla/5.0'})
response = json.loads(urlopen(request, timeout=600, context=ctx).read().decode('utf-8'))
if response.get('status') == 'finished':
sys.exit(POSTPROCESS_SUCCESS)
elif response.get('status') == 'incomplete':
sys.exit(POSTPROCESS_ERROR)
else:
sys.exit(POSTPROCESS_NONE)
# pylama:ignore=E266,E265
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
29113,
7804,
2235,
198,
7804,
8410,
5626,
19164,
5064,
56,
12680,
45811,
0,
46424,
21017,
198,
2235,
25626,
11335,
7824,
24890,
35383,
32632,
26905,
33,
18851,
12454,
22492,
198,
... | 2.67806 | 907 |
import pickle
import numpy as np
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.3 | 10 |
import time
import pytest
import sys
from unittest import mock
from aioredis import ReplyError
from _testutils import redis_version
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.skipif(sys.platform == 'win32',
reason="No unixsocket on Windows")
@pytest.mark.run_loop
@redis_version(
2, 9, 50, reason='CLIENT PAUSE is available since redis >= 2.9.50')
@pytest.mark.run_loop
@redis_version(2, 8, 13, reason="available since Redis 2.8.13")
@pytest.mark.run_loop
@redis_version(2, 8, 13, reason="available since Redis 2.8.13")
@pytest.mark.run_loop
@redis_version(3, 0, 0, reason="available since Redis 3.0.0")
@pytest.mark.run_loop
@redis_version(2, 8, 13, reason="available since Redis 2.8.13")
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
# @pytest.mark.run_loop
# @pytest.mark.skip("Not implemented")
# def test_config_resetstat():
# pass
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12')
@pytest.mark.run_loop
@pytest.mark.parametrize('encoding', [
pytest.param(None, id='no decoding'),
pytest.param('utf-8', id='with decoding'),
])
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
| [
11748,
640,
198,
11748,
12972,
9288,
198,
11748,
25064,
198,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
257,
72,
1850,
271,
1330,
14883,
12331,
198,
6738,
4808,
9288,
26791,
1330,
2266,
271,
62,
9641,
628,
198,
31,
9078,
9... | 2.412969 | 586 |
# coding: utf-8
from __future__ import unicode_literals
from ..util import get_doc
import pytest
@pytest.mark.skip
@pytest.mark.models('en')
def test_issue514(EN):
"""Test serializing after adding entity"""
text = ["This", "is", "a", "sentence", "about", "pasta", "."]
vocab = EN.entity.vocab
doc = get_doc(vocab, text)
EN.entity.add_label("Food")
EN.entity(doc)
label_id = vocab.strings[u'Food']
doc.ents = [(label_id, 5,6)]
assert [(ent.label_, ent.text) for ent in doc.ents] == [("Food", "pasta")]
doc2 = get_doc(EN.entity.vocab).from_bytes(doc.to_bytes())
assert [(ent.label_, ent.text) for ent in doc2.ents] == [("Food", "pasta")]
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
11485,
22602,
1330,
651,
62,
15390,
198,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
48267,
198,
3... | 2.425532 | 282 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python modules for data processing utilities:
dataworkshop: main GUI framework for data post-processing
Author: Tong Zhang
Created: Sep. 23rd, 2015
"""
from __future__ import print_function
import wx
import time
import threading
import numpy as np
import matplotlib.pyplot as plt
import h5py
import os
import shutil
from . import funutils
from . import pltutils
from . import imageutils
from . import resutils
#class PlotPanel(pltutils.ImagePanelxy):
# def __init__(self, parent, figsize, dpi, bgcolor, **kwargs):
# pltutils.ImagePanelxy.__init__(self, parent, figsize, dpi, bgcolor, **kwargs)
## self.axes.set_aspect('equal')
# ImageGrid: do not use this now
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
29412,
13103,
329,
1366,
7587,
20081,
25,
198,
220,
220,
220,
4818,
707,
3647,
8548,
25,
1388,
25757,
9355,... | 2.853846 | 260 |
from django.conf import settings
from django.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from django.views.generic.base import TemplateView
from core import __version__
from core.apiv1 import apiv1_gone
from homepage.views import homepage, error_404_view, error_500_view, health_check_view, OpenView, SitemapView
from package.views import category, python3_list
from profiles.views import LogoutView
admin_header = f"Django Packages v{__version__}"
admin.site.enable_nav_sidebar = False # disabled until Django 3.x
admin.site.site_header = admin_header
admin.site.site_title = admin_header
urlpatterns = [
# url(r'^login/\{\{item\.absolute_url\}\}/', RedirectView.as_view(url="/login/github/")),
path('auth/', include('social_django.urls', namespace='social')),
# url('', include('social_auth.urls')),
path('', homepage, name="home"),
path('health_check/', health_check_view, name="health_check"),
path('404', error_404_view, name="404"),
path('500', error_500_view, name="500"),
re_path(settings.ADMIN_URL_BASE, admin.site.urls),
path('profiles/', include("profiles.urls")),
path('packages/', include("package.urls")),
path('grids/', include("grid.urls")),
path('feeds/', include("feeds.urls")),
path('categories/<slug:slug>/', category, name="category"),
path('categories/', homepage, name="categories"),
path('python3/', python3_list, name="py3_compat"),
# url(regex=r'^login/$', view=TemplateView.as_view(template_name='pages/login.html'), name='login',),
path('logout/', LogoutView.as_view(), name='logout'),
# static pages
path('about/', TemplateView.as_view(template_name='pages/faq.html'), name="about"),
path('terms/', TemplateView.as_view(template_name='pages/terms.html'), name="terms"),
path('faq/', TemplateView.as_view(template_name='pages/faq.html'), name="faq"),
path('open/', OpenView.as_view(), name="open"),
path('syndication/', TemplateView.as_view(template_name='pages/syndication.html'), name="syndication"),
path('help/', TemplateView.as_view(template_name='pages/help.html'), name="help"),
path("funding/", TemplateView.as_view(template_name='pages/funding.html'), name="funding"),
path("sitemap.xml", SitemapView.as_view(), name="sitemap"),
# new apps
path('search/', include("searchv2.urls")),
# apiv2
# url(r'^api/v2/', include('core.apiv2', namespace="apiv2")),
# apiv3
path('api/v3/', include('apiv3.urls', namespace="apiv3")),
# apiv4
path('api/v4/', include("apiv4.urls", namespace='apiv4')),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
re_path(
r"^api/v1/.*$", view=apiv1_gone,
name="apiv1_gone",
),
# url(r'^api/v1/', include('core.apiv1', namespace="apitest")),
# reports
# url(r'^reports/', include('reports.urls', namespace='reports')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns.append(path("__debug__/", include(debug_toolbar.urls)))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625... | 2.680404 | 1,189 |
"""Utilities for unit testing."""
from importlib import __import__
import pytest
import sys
class Mocktoday(object):
"""A context manager for to fake out datetime.date.today.
Monkeypatches the given module to swap out today with our
own function so that we can say that "today" is any date we want.
This assumes you're using emol.utility.date.today and not date.today()
(the latter is a proxy for the former to facilitate this thing)
Usage:
from utils import Mocktoday
def test_party_like_its_1999():
with Mocktoday('emol.this.that', date(1999, 01, 01))
# do something that will call today()
"""
def mock_today(self):
"""Replacement for today function for unit tests."""
return self.desired_date
def __init__(self, module, desired_date):
"""Constructor.
Args:
module: The module name to operate on
desired_date: The date to report
"""
self.module = sys.modules[module]
self.desired_date = desired_date
# Swap out the module's imported today function
self.orig = self.module.today
self.module.today = self.mock_today
def __enter__(self, *args, **kwargs):
"""Nothing to do here."""
pass
def __exit__(self, *args, **kwargs):
"""Reset the proper today function."""
self.module.today = self.orig
class Mockmail(object):
"""A context manager for mail.Emailer unit testing.
Monkeypatches the given module to swap out Emailer with a testing class
so that we can test whether or not any of the email sending methods were
called by an invocation.
Usage (from a model unit test):
from emol.utility.testing import Mockmail
def test_something_that_sends_mail():
with Mockmail('emol.this.that', True)
# do something that should cause send_email to be called
def test_something_that_does_not_mail():
with Mockmail('emol.this.that', False)
# do something that should not cause send_email to be called
"""
messages = {
True: 'Expected send_email to be called but it was not',
False: 'Expected send_email to not be called but it was'
}
def __init__(self, module, expected_result):
"""Constructor.
Args:
module: The module name to operate on
expected_result: True if a call that sends email is expected,
False if not. None for no test (fixtures)
"""
class TestEmailer(object):
"""Unit testing replacement for mail.Emailer.
Override all the methods below to just set self.mocked on
the enclosing Mockmail.
"""
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
self.expected_result = expected_result
self.module = sys.modules[module]
# Swap out the module's Emailer loaded from emol.mail
self.orig = self.module.Emailer
self.module.Emailer = TestEmailer
def __enter__(self):
"""Reset self.mocked."""
self.mocked = False
def __exit__(self, *args, **kwargs):
"""Reset the proper send_email method and assess the result."""
self.module.Emailer = self.orig
if self.expected_result is None:
return
if self.mocked != self.expected_result:
pytest.fail(self.messages[self.expected_result])
| [
37811,
18274,
2410,
329,
4326,
4856,
526,
15931,
198,
198,
6738,
1330,
8019,
1330,
11593,
11748,
834,
198,
11748,
12972,
9288,
198,
11748,
25064,
628,
198,
4871,
44123,
40838,
7,
15252,
2599,
198,
220,
220,
220,
37227,
32,
4732,
4706,
3... | 2.501038 | 1,445 |
#!/usr/bin/env python3
# Invoked by: Cloudformation custom actions
# Returns: Error or status message
#
# deletes the resources assocated with lambda like eni
import boto3
import http.client
import urllib
import json
import uuid
import threading
from time import sleep
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
10001,
6545,
416,
25,
10130,
1161,
2183,
4028,
198,
2,
16409,
25,
13047,
393,
3722,
3275,
198,
2,
198,
2,
28128,
274,
262,
4133,
840,
10533,
351,
37456,
588,
551,
72,
198,
1... | 3.635135 | 74 |
from copy import copy
from ..device.lasercommandconstants import (
COMMAND_BEEP,
COMMAND_FUNCTION,
COMMAND_HOME,
COMMAND_MODE_RAPID,
COMMAND_MOVE,
COMMAND_SET_ABSOLUTE,
COMMAND_SET_POSITION,
COMMAND_UNLOCK,
COMMAND_WAIT,
COMMAND_WAIT_FINISH,
)
from ..kernel import Modifier
from ..svgelements import Length
from .cutcode import CutCode, CutGroup, RasterCut
from .cutplan import CutPlan
from .elements import LaserOperation
MILS_IN_MM = 39.3701
class Planner(Modifier):
"""
Planner is a modifier that adds 'plan' commands to the kernel. These are text based versions of the job preview and
should be permitted to control the job creation process.
"""
def get_or_make_plan(self, plan_name):
"""
Plans are a tuple of 3 lists and the name. Plan, Original, Commands, and Plan-Name
"""
try:
return self._plan[plan_name]
except KeyError:
self._plan[plan_name] = CutPlan(plan_name, self.context)
return self._plan[plan_name]
def reify_matrix(self):
"""Apply the matrix to the path and reset matrix."""
self.element = abs(self.element)
self.scene_bounds = None
def correct_empty(context: CutGroup):
"""
Iterate backwards deleting any entries that are empty.
"""
for index in range(len(context) - 1, -1, -1):
c = context[index]
if isinstance(c, CutGroup):
correct_empty(c)
if c.inside:
for o in c.inside:
if o.contains and c in o.contains:
o.contains.remove(c)
del context[index]
| [
6738,
4866,
1330,
4866,
198,
198,
6738,
11485,
25202,
13,
75,
6005,
21812,
9979,
1187,
1330,
357,
198,
220,
220,
220,
22240,
6981,
62,
33,
35238,
11,
198,
220,
220,
220,
22240,
6981,
62,
42296,
4177,
2849,
11,
198,
220,
220,
220,
22... | 2.404971 | 684 |
# Example taken from:
# https://stackoverflow.com/questions/39561560/getting-gradient-of-model-output-w-r-t-weights-using-keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
model = Sequential()
model.add(Dense(8, input_dim=8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
outputTensor = model.output
listOfVariableTensors = model.trainable_weights
gradients = K.gradients(outputTensor, listOfVariableTensors)
trainingExample = np.random.random((1, 8))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
evaluated_gradients = sess.run(gradients, feed_dict={model.input: trainingExample})
| [
2,
17934,
2077,
422,
25,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
2670,
3980,
1314,
1899,
14,
37210,
12,
49607,
12,
1659,
12,
19849,
12,
22915,
12,
86,
12,
81,
12,
83,
12,
43775,
12,
3500,
12,
6122,
29... | 2.982332 | 283 |
#!/usr/bin/python
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Engine.engine import VenueBaseCmdProcessor, ClientBaseCmdProcessor, ClientBaseCmdTraits, VenueBaseCmdTraits
from ouch.ouchprotocol import OuchProtocol
import datetime
from collections import deque
import pdb
#######################################################
# Currently there is no specific command back and forth
# ouch venue/ ouch client
#######################################################
if __name__ == "__main__":
test()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
10104,
198,
198,
14421,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
1040,
806,
13,
1136,
7753,
7,
104... | 3.564516 | 186 |
#!/usr/bin/python
import json
import re
import requests
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
try:
inputFile = open("input.json")
except Exception, e:
print e
exit()
try:
urls = json.load(inputFile)['urls']
except Exception, e:
print e
exit()
## Object to store the stocks list
## key: fund name
## value: stocks list
detailed_portfolio = {}
keys = []
## Function to add some additional data to output
## currently adding asset allocation, i.e. cash, stock and number of stocks held
for url in urls:
print url
key = url.rpartition('/')[0].rpartition('/')[2]
keys.append(key)
detailed_portfolio.setdefault(key, {})
detailed_portfolio[key].setdefault("stocks-data", [])
## Fetch detailed portfolio data
response = requests.get(url)
print response.status_code
if (response.status_code != 200) or (response.content == ""):
print "No content to parse. Please try again!"
exit()
page = response.content
soup = BeautifulSoup(page, "lxml")
text = soup.body.find_all("td", string=re.compile("Equity"))
count = 0
for td in text:
stock_td = td.previous_sibling.previous_sibling.a
if not stock_td:
stock_td = td.previous_sibling.previous_sibling.span
sector_td = td.next_sibling.next_sibling
weighting_td = sector_td.next_sibling.next_sibling.next_sibling.next_sibling
current = {}
try:
stock_name = stock_td.string
stock_name = stock_name.replace(' Corp ', ' Corporation ')
if not re.match(r"(.*?)Ltd(?=.)", stock_name):
stock_name = stock_name.replace("Ltd", "Ltd.")
stock_name = stock_name.replace(" Limited", " Ltd.")
stock_name = stock_name.replace(" Co ", " Company ")
current["stock"] = stock_name
current["sector"] = sector_td.string
current["weighting"] = weighting_td.string
detailed_portfolio[key]["stocks-data"].append(current)
count += 1
except:
pass
add_additional_data(soup, count, key, detailed_portfolio)
outfile = open('stocks-list.json', 'w')
json.dump(detailed_portfolio, outfile)
outfile.close()
exit()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
7007,
198,
28311,
25,
198,
220,
220,
220,
422,
23762,
50,
10486,
1330,
23762,
50,
10486,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,... | 2.403292 | 972 |
"""
Basic functions for working with images.
"""
from __future__ import division, print_function, absolute_import
import itertools as itr
import numpy as np
def _import_skimage():
"""Import scikit-image, with slightly modified `ImportError` message"""
try:
import skimage
except ImportError:
raise ImportError("scikit-image is required to use this function.")
return skimage
def _import_pil():
"""Import scikit-image, with slightly modified `ImportError` message"""
try:
import PIL
except ImportError:
raise ImportError("PIL/Pillow is required to use this function.")
return PIL
def resize_by_factor(im, factor):
"""
Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides.
"""
_import_skimage()
from skimage.transform.pyramids import pyramid_reduce, pyramid_expand
if factor < 1:
return pyramid_reduce(im, downscale=1/factor)
elif factor > 1:
return pyramid_expand(im, upscale=factor)
else:
return im
def asgray(im):
"""
Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image.
"""
if im.ndim == 2:
return im
elif im.ndim == 3 and im.shape[2] in (3, 4):
return im[..., :3].mean(axis=-1)
else:
raise ValueError('Invalid image format')
def crop(im, size):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2
def crop_or_pad(im, size, value=0):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2
def crop_to_bounding_box(im, bb):
"""
Crops according to a bounding box.
Parameters
----------
bounding_box : tuple, (top, left, bottom, right)
Crops inclusively for top/left and exclusively for bottom/right.
"""
return im[bb[0]:bb[2], bb[1]:bb[3]]
def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
_import_skimage()
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype')
def load_raw(path):
"""
Load image using PIL/Pillow without any processing. This is particularly
useful for palette images, which will be loaded using their palette index
values as opposed to `load` which will convert them to RGB.
Parameters
----------
path : str
Path to image file.
"""
_import_pil()
from PIL import Image
return np.array(Image.open(path))
def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path)
def integrate(ii, r0, c0, r1, c1):
"""
Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
# This line is modified
S = np.zeros(ii.shape[-1])
S += ii[r1, c1]
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += ii[r0 - 1, c0 - 1]
if (r0 - 1 >= 0):
S -= ii[r0 - 1, c1]
if (c0 - 1 >= 0):
S -= ii[r1, c0 - 1]
return S
def offset(img, offset, fill_value=0):
"""
Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0.
"""
sh = img.shape
if sh == (0, 0):
return img
else:
x = np.empty(sh)
x[:] = fill_value
x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]),
max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \
img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]),
max(-offset[1], 0):min(sh[1]-offset[1], sh[1])]
return x
def bounding_box(alpha, threshold=0.1):
"""
Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold.
"""
assert alpha.ndim == 2
# Take the bounding box of the support, with a certain threshold.
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
# Check first and last value of that threshold
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])
def bounding_box_as_binary_map(alpha, threshold=0.1):
"""
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
"""
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x
def extract_patches(images, patch_shape, samples_per_image=40, seed=0,
cycle=True):
"""
Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show()
"""
rs = np.random.RandomState(seed)
for Xi in itr.cycle(images):
# How many patches could we extract?
w, h = [Xi.shape[i]-patch_shape[i] for i in range(2)]
assert w > 0 and h > 0
# Maybe shuffle an iterator of the indices?
indices = np.asarray(list(itr.product(range(w), range(h))))
rs.shuffle(indices)
for x, y in indices[:samples_per_image]:
yield Xi[x:x+patch_shape[0], y:y+patch_shape[1]]
| [
37811,
198,
26416,
5499,
329,
1762,
351,
4263,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
11748,
340,
861,
10141,
355,
340,
81,
198,
11748,
299,
32152,
355,
45941,
198,
198,... | 2.510616 | 3,815 |
from SeleniumLibrary.base import LibraryComponent, keyword
| [
6738,
15300,
47477,
23377,
13,
8692,
1330,
10074,
21950,
11,
21179,
628
] | 5 | 12 |
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from config_tempest import constants as C
from config_tempest.services.base import VersionedService
from tempest.lib import exceptions
| [
2,
15069,
2864,
2297,
10983,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846... | 3.836735 | 196 |
# Generated by Django 3.2.5 on 2021-08-04 11:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2919,
12,
3023,
1367,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from concurrent.futures.thread import ThreadPoolExecutor
from src.common.log import log
from src.core.path_settings import TEST_CASES
def run_thread(suits, thread_num: int = 8):
"""
多线程运行所有用例
:param suits: 测试用例
:param thread_num: 线程数
:return:
"""
res = unittest.TestResult()
with ThreadPoolExecutor(max_workers=thread_num) as tp:
for case in suits:
tp.submit(case.run, result=res)
return res
if __name__ == '__main__':
try:
test_suite = unittest.defaultTestLoader.discover(TEST_CASES, 'test*.py')
runner = run_thread(test_suite)
except Exception as e:
log.error('运行出错!!!请检查!!!')
raise e
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
6738,
24580,
13,
69,
315,
942,
13,
16663,
1330,
14122,
27201,
23002,
38409,
198,
... | 1.963446 | 383 |
import multiprocessing as mp
import time
import random
random.seed()
send_queue = mp.Queue()
receive_queue = mp.Queue()
proc = mp.Process(target=send_fast, args=(send_queue, receive_queue,))
proc.start()
send_slow(send_queue, receive_queue)
| [
11748,
18540,
305,
919,
278,
355,
29034,
198,
11748,
640,
198,
11748,
4738,
198,
198,
25120,
13,
28826,
3419,
628,
628,
198,
198,
21280,
62,
36560,
796,
29034,
13,
34991,
3419,
198,
260,
15164,
62,
36560,
796,
29034,
13,
34991,
3419,
... | 2.917647 | 85 |
import json
from datetime import datetime
from pathlib import Path
import numpy as np
import requests
def download_file(url: str, path: Path):
"""Download file from the given url and store it to path."""
response = requests.get(url, stream=True)
with path.open('wb') as file:
for data in response.iter_content():
file.write(data)
def parse_params(params: str) -> dict:
"""Parse the given parameters to dictionary."""
return json.loads(params.replace('\'', '\"')) if params else dict()
def eval_params(params: dict) -> dict:
"""Evaluate values if they are not string."""
return {key: eval(value) if isinstance(value, str) else value for key, value in params.items()}
def get_write_method(logger):
"""Generate write method for logger."""
return write
| [
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
7007,
628,
198,
4299,
4321,
62,
7753,
7,
6371,
25,
965,
11,
3108,
25,
10644,
2599,
198,
22... | 3.025926 | 270 |
# -*- coding: UTF-8 -*-
v = 14
if type(v) != int:
print("Error: Only Support Integer!")
elif v % 2 == 0:
print(0)
elif v % 2 == 1:
print(v)
v = 12
print(type(v))
v = 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
print(type(v))
v = 12.5
print(type(v))
v = 12 + 5j
print(type(v))
v = True
print(type(v))
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
85,
796,
1478,
198,
198,
361,
2099,
7,
85,
8,
14512,
493,
25,
198,
220,
220,
220,
3601,
7203,
12331,
25,
5514,
7929,
34142,
2474,
8,
198,
417,
361,
410,
4064,
362,... | 2.888158 | 152 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 5 13:17:22 2021
@author: trduong
"""
# import os, sys;
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import pandas as pd
import numpy as np
import logging
import yaml
import pyro
import torch
import pyro.distributions as dist
import argparse
import sys
import pprint
import gc
from utils.evaluate_func import evaluate_pred, evaluate_distribution, evaluate_fairness
from utils.helpers import load_config
if __name__ == "__main__":
"""Load configuration"""
config_path = "/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml"
conf = load_config(config_path)
"""Parsing argument"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=conf["ivr_law"])
parser.add_argument('--result_path', type=str, default=conf["evaluate_law"])
args = parser.parse_args()
data_path = args.data_path
result_path = args.result_path
"""Set up logging"""
logger = logging.getLogger('genetic')
file_handler = logging.FileHandler(filename=conf['evaluate_law_log'])
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
"""Load data"""
col = ["full_linear", "full_net",
"unaware_linear", "unaware_net",
"level2_lin_True", "level2_lin_False",
"level3_lin_True", "level3_lin_False",
"AL_prediction", "GL_prediction", "GD_prediction"]
df2 = pd.read_csv(data_path)
df1 = pd.read_csv(conf['law_baseline'])
df2 = df2.drop(columns = ['LSAT','UGPA','ZFYA', 'race','sex'])
df = pd.concat([df1, df2], axis=1)
df_result = pd.DataFrame()
df_result['method'] = ''
df_result['RMSE'] = ''
df_result['MAE'] = ''
df_result['sinkhorn'] = ''
df_result['energy'] = ''
df_result['gaussian'] = ''
df_result['laplacian'] = ''
"""Evaluate performance"""
df_result = evaluate_law(df, df_result, col)
df_result['RMSE'] = df_result['RMSE'].round(decimals=4)
df_result['MAE'] = df_result['MAE'].round(decimals=4)
df_result['sinkhorn'] = df_result['sinkhorn'].round(decimals=4)
df_result['energy'] = df_result['energy'].round(decimals=4)
df_result['gaussian'] = df_result['gaussian'].round(decimals=4)
df_result['laplacian'] = df_result['laplacian'].round(decimals=4)
df_result.to_csv(result_path, index = False)
print(df_result)
print('Collecting...')
n = gc.collect()
print('Unreachable objects:', n)
print('Remaining Garbage:',)
pprint.pprint(gc.garbage)
sys.modules[__name__].__dict__.clear()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
1737,
220,
642,
1511,
25,
1558,
25,
1828,
33448,
198,
198,
31,
9800,
25,
491,
646,
... | 2.399675 | 1,231 |
"""
[2015-09-09] Challenge #231 [Intermediate] Set Game Solver
https://www.reddit.com/r/dailyprogrammer/comments/3ke4l6/20150909_challenge_231_intermediate_set_game/
Our apologies for the delay in getting this posted, there was some technical difficulties behind the scenes.
# Description
Set is a card game where each card is defined by a combination of four attributes: shape (diamond, oval, or squiggle),
color (red, purple, green), number (one, two, or three elements), and shading (open, hatched, or filled). The object of
the game is to find sets in the 12 cards drawn at a time that are distinct in every way or identical in just one way
(e.g. all of the same color). From Wikipedia: A set consists of three cards which satisfy all of these conditions:
* They all have the same number, or they have three different numbers.
* They all have the same symbol, or they have three different symbols.
* They all have the same shading, or they have three different shadings.
* They all have the same color, or they have three different colors.
The rules of Set are summarized by: If you can sort a group of three cards into "Two of ____ and one of _____," then it
is not a set.
See the [Wikipedia page for the Set game](http://en.wikipedia.org/wiki/Set_(game\))
for for more background.
# Input Description
A game will present 12 cards described with four characters for shape, color, number, and shading: (D)iamond, (O)val,
(S)quiggle; (R)ed, (P)urple, (G)reen; (1), (2), or (3); and (O)pen, (H)atched, (F)illed.
# Output Description
Your program should list all of the possible sets in the game of 12 cards in sets of triplets.
# Example Input
SP3F
DP3O
DR2F
SP3H
DG3O
SR1H
SG2O
SP1F
SP3O
OR3O
OR3H
OR2H
# Example Output
SP3F SR1H SG2O
SP3F DG3O OR3H
SP3F SP3H SP3O
DR2F SR1H OR3O
DG3O SP1F OR2H
DG3O SP3O OR3O
# Challenge Input
DP2H
DP1F
SR2F
SP1O
OG3F
SP3H
OR2O
SG3O
DG2H
DR2H
DR1O
DR3O
# Challenge Output
DP1F SR2F OG3F
DP2H DG2H DR2H
DP1F DG2H DR3O
SR2F OR2O DR2H
SP1O OG3F DR2H
OG3F SP3H DR3O
"""
if __name__ == "__main__":
main()
| [
37811,
198,
58,
4626,
12,
2931,
12,
2931,
60,
13879,
1303,
25667,
685,
9492,
13857,
60,
5345,
3776,
4294,
332,
198,
198,
5450,
1378,
2503,
13,
10748,
13,
785,
14,
81,
14,
29468,
23065,
647,
14,
15944,
14,
18,
365,
19,
75,
21,
14,
... | 2.700246 | 814 |
from torch.nn import Module
from torch import nn
##### Part forward without last classification layer for the purpose of FID computing
| [
6738,
28034,
13,
20471,
1330,
19937,
198,
6738,
28034,
1330,
299,
77,
628,
198,
220,
220,
220,
46424,
2142,
2651,
1231,
938,
17923,
7679,
329,
262,
4007,
286,
376,
2389,
14492,
198
] | 4.40625 | 32 |
import simplejson as json
from decimal import Decimal
import datetime
from unittest import TestCase
from unittest.mock import patch, MagicMock
from requests import HTTPError
from web_payments_paypal import PaypalProvider, PaypalCardProvider
from web_payments import RedirectNeeded, PaymentError, PaymentStatus
from web_payments.testcommon import create_test_payment
CLIENT_ID = 'abc123'
PAYMENT_TOKEN = '5a4dae68-2715-4b1e-8bb2-2c2dbe9255f6'
SECRET = '123abc'
VARIANT = 'paypal'
PROCESS_DATA = {
'name': 'John Doe',
'number': '371449635398431',
'expiration': (datetime.datetime.now()+datetime.timedelta(weeks=3*52)).strftime("%m/%Y"),
'cvv2': '1234'}
Payment = create_test_payment(variant=VARIANT, token=PAYMENT_TOKEN)
Payment.extra_data = json.dumps({'links': {
'approval_url': None,
'capture': {'href': 'http://capture.com'},
'refund': {'href': 'http://refund.com'},
'execute': {'href': 'http://execute.com'}
}})
| [
11748,
2829,
17752,
355,
33918,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
4818,
8079,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
6139,
44,
735,
198,
6738,
7007,
1330,
1462... | 2.482143 | 392 |
import nltk
import numpy as np
import pandas as pd
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
nltk.download('vader_lexicon')
sid = SentimentIntensityAnalyzer()
a = 'This was a good movie'
score = sid.polarity_scores(a)
print(score)
a = 'This was the best, most awesome movie EVER MADE!!!'
score = sid.polarity_scores(a)
print(score)
a = 'This was the worst film to ever disgrace the screen'
score = sid.polarity_scores(a)
print(score)
# Anhand von echten Daten
df = pd.read_csv('./TextFiles/amazonreviews.tsv', sep='\t')
print(df.head())
print(df['label'].value_counts())
df.dropna(inplace=True)
blanks = []
for i,lb,rv in df.itertuples(): # Iteriere über den DataFrame
if type(rv)==str: # Vermeide NaN-Werte
if rv.isspace(): # Teste 'review' auf Leerzeichen
blanks.append(i) # Füge der Liste passende Indizes hinzu
df.drop(blanks, inplace=True)
print(df['label'].value_counts())
a = df.loc[0]['review'] # erste Zeile aus dem DataFrame
score = sid.polarity_scores(a)
print(score)
# Scores für das ganze DataFrame berechnen und hinzufügen
df['scores'] = df['review'].apply(lambda review: sid.polarity_scores(review))
print(df.head())
# Value von compound holen
df['compound'] = df['scores'].apply(lambda score_dict: score_dict['compound'])
print(df.head())
# compound score berechnen (Anzeige ob positiv oder negativ)
df['comp_score'] = df['compound'].apply(lambda c: 'pos' if c >= 0 else 'neg')
print(df.head())
# Genauigkeit ausgeben
print(accuracy_score(df['label'], df['comp_score']))
# Übersicht ausgeben
print(classification_report(df['label'], df['comp_score']))
# Konfusionsmatrix ausgeben
print(confusion_matrix(df['label'], df['comp_score'])) | [
11748,
299,
2528,
74,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
299,
2528,
74,
13,
34086,
3681,
13,
85,
5067,
1330,
11352,
3681,
5317,
6377,
37702,
9107,
198,
6738,
1341,
35720,
13,
4164,
104... | 2.549157 | 712 |
import numpy as np
import torch
import torch.nn as nn
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
628,
628
] | 3.105263 | 19 |
# ref: https://tutorials-raspberrypi.com/raspberry-pi-servo-motor-control/
import RPi.GPIO as GPIO
import time
servoPIN = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(servoPIN, GPIO.OUT)
| [
2,
1006,
25,
3740,
1378,
83,
44917,
82,
12,
81,
17653,
14415,
13,
785,
14,
81,
17653,
12,
14415,
12,
3168,
78,
12,
76,
20965,
12,
13716,
14,
198,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
198,
3168,... | 2.394737 | 76 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='Project.Euler',
version='0.0.1',
description='Python3 implementation of Project Euler problems.',
author='Jonathan Madden',
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
16775,
13,
36,
18173,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
... | 2.772152 | 79 |
from rest_framework.views import APIView, Response
from .models import Transaction
from rest_framework.permissions import AllowAny
from .click_authorization import click_authorization
from .serializer import ClickUzSerializer
from .status import *
| [
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
11,
18261,
198,
6738,
764,
27530,
1330,
45389,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
22507,
7149,
198,
6738,
764,
12976,
62,
9800,
1634,
1330,
3904,
62,
9800,
1634,
1... | 4.220339 | 59 |
from collections import defaultdict
instruction_list = lambda:defaultdict(instruction_list)
instruction_table = instruction_list()
# RV32I
## Branch
instruction_table['0x18']['0'] = 'beq'
instruction_table['0x18']['1'] = 'bne'
instruction_table['0x18']['4'] = 'blt'
instruction_table['0x18']['5'] = 'bge'
instruction_table['0x18']['6'] = 'bltu'
instruction_table['0x18']['7'] = 'bgeu'
## Jump
instruction_table['0x19'] = 'jalr'
instruction_table['0x1b'] = 'jal'
## Upper Immediate
instruction_table['0x0d'] = 'lui'
instruction_table['0x05'] = 'auipc'
## Arithmetic & Computation Immediate
instruction_table['0x04']['0'] = 'addi'
instruction_table['0x04']['1'] = 'slli'
instruction_table['0x04']['2'] = 'slti'
instruction_table['0x04']['3'] = 'sltiu'
instruction_table['0x04']['4'] = 'xori'
instruction_table['0x04']['5']['0'] = 'srli'
instruction_table['0x04']['5']['16'] = 'srai'
instruction_table['0x04']['6'] = 'ori'
instruction_table['0x04']['7'] = 'andi'
## Arithmetic & Computation Register to Register
instruction_table['0x0c']['0']['0'] = 'add'
instruction_table['0x0c']['0']['32'] = 'sub'
instruction_table['0x0c']['1']['0'] = 'sll'
instruction_table['0x0c']['2']['0'] = 'slt'
instruction_table['0x0c']['3']['0'] = 'sltu'
instruction_table['0x0c']['4']['0'] = 'xor'
instruction_table['0x0c']['5']['0'] = 'srl'
instruction_table['0x0c']['5']['32'] = 'sra'
instruction_table['0x0c']['6']['0'] = 'or'
instruction_table['0x0c']['7']['0'] = 'and'
## Extended Arithmetic & Computation Immediate
instruction_table['0x06']['0'] = 'addiw'
instruction_table['0x06']['1'] = 'slliw'
instruction_table['0x06']['5']['0'] = 'srliw'
instruction_table['0x06']['5']['16'] = 'sraiw'
## Extended Arithmetic & Computation Register to Register
instruction_table['0x0e']['0']['0'] = 'addw'
instruction_table['0x0e']['0']['32'] = 'subw'
instruction_table['0x0e']['1']['0'] = 'sllw'
instruction_table['0x0e']['5']['0'] = 'srlw'
instruction_table['0x0e']['5']['32'] = 'sraw'
## Load
instruction_table['0x00']['0'] = 'lb'
instruction_table['0x00']['1'] = 'lh'
instruction_table['0x00']['2'] = 'lw'
instruction_table['0x00']['3'] = 'ld'
instruction_table['0x00']['4'] = 'lbu'
instruction_table['0x00']['5'] = 'lhu'
instruction_table['0x00']['6'] = 'lwu'
## Store
instruction_table['0x08']['0'] = 'sb'
instruction_table['0x08']['1'] = 'sh'
instruction_table['0x08']['2'] = 'sw'
instruction_table['0x08']['3'] = 'sd'
## Fence
instruction_table['0x03']['0'] = 'fence'
instruction_table['0x03']['1'] = 'fence.i'
# RV32M
instruction_table['0x0c']['0']['1'] = 'mul'
instruction_table['0x0c']['1']['1'] = 'mulh'
instruction_table['0x0c']['2']['1'] = 'mulhsu'
instruction_table['0x0c']['3']['1'] = 'mulhu'
instruction_table['0x0c']['4']['1'] = 'div'
instruction_table['0x0c']['5']['1'] = 'divu'
instruction_table['0x0c']['6']['1'] = 'rem'
instruction_table['0x0c']['7']['1'] = 'remu'
# RV64M
instruction_table['0x0e']['0']['1'] = 'mulw'
instruction_table['0x0e']['4']['1'] = 'divw'
instruction_table['0x0e']['5']['1'] = 'divuw'
instruction_table['0x0e']['6']['1'] = 'remw'
instruction_table['0x0e']['7']['1'] = 'remuw'
# RV32A
instruction_table['0x0b']['2']['0']['0'] = 'amoadd.w'
instruction_table['0x0b']['2']['0']['1'] = 'amoxor.w'
instruction_table['0x0b']['2']['0']['2'] = 'amoor.w'
instruction_table['0x0b']['2']['0']['3'] = 'amoand.w'
instruction_table['0x0b']['2']['0']['4'] = 'amomin.w'
instruction_table['0x0b']['2']['0']['5'] = 'amomax.w'
instruction_table['0x0b']['2']['0']['6'] = 'amominu.w'
instruction_table['0x0b']['2']['0']['7'] = 'amomaxu.w'
instruction_table['0x0b']['2']['1']['0'] = 'amoswap.w'
instruction_table['0x0b']['2']['2']['0'] = 'lr.w'
instruction_table['0x0b']['2']['3']['0'] = 'sc.w'
# RV64A
instruction_table['0x0b']['3']['0']['0'] = 'amoadd.d'
instruction_table['0x0b']['3']['0']['1'] = 'amoxor.d'
instruction_table['0x0b']['3']['0']['2'] = 'amoor.d'
instruction_table['0x0b']['3']['0']['3'] = 'amoand.d'
instruction_table['0x0b']['3']['0']['4'] = 'amomin.d'
instruction_table['0x0b']['3']['0']['5'] = 'amomax.d'
instruction_table['0x0b']['3']['0']['6'] = 'amominu.d'
instruction_table['0x0b']['3']['0']['7'] = 'amomaxu.d'
instruction_table['0x0b']['3']['1']['0'] = 'amoswap.d'
instruction_table['0x0b']['3']['2']['0'] = 'lr.d'
instruction_table['0x0b']['3']['3']['0'] = 'sc.d'
# F/D EXTENSIONS
instruction_table['0x14']['0']['0'] = 'fadd.s'
instruction_table['0x14']['1']['0'] = 'fsub.s'
instruction_table['0x14']['2']['0'] = 'fmul.s'
instruction_table['0x14']['3']['0'] = 'fdiv.s'
instruction_table['0x14']['11']['0'] = 'fsqrt.s'
instruction_table['0x14']['4']['0']['0'] = 'fsgnj.s'
instruction_table['0x14']['4']['0']['1'] = 'fsgnjn.s'
instruction_table['0x14']['4']['0']['2'] = 'fsgnjx.s'
instruction_table['0x14']['5']['0']['0'] = 'fmin.s'
instruction_table['0x14']['5']['0']['1'] = 'fmax.s'
instruction_table['0x14']['0']['1'] = 'fadd.d'
instruction_table['0x14']['1']['1'] = 'fsub.d'
instruction_table['0x14']['2']['1'] = 'fmul.d'
instruction_table['0x14']['3']['1'] = 'fdiv.d'
instruction_table['0x14']['8']['0'] = 'fcvt.s.d'
instruction_table['0x14']['8']['1'] = 'fcvt.d.s'
instruction_table['0x14']['11']['1'] = 'fsqrt.d'
instruction_table['0x14']['4']['1']['0'] = 'fsgnj.d'
instruction_table['0x14']['4']['1']['1'] = 'fsgnjn.d'
instruction_table['0x14']['4']['1']['2'] = 'fsgnjx.d'
instruction_table['0x14']['5']['1']['0'] = 'fmin.d'
instruction_table['0x14']['5']['1']['1'] = 'fmax.s'
instruction_table['0x14']['20']['0']['0'] = 'fle.s'
instruction_table['0x14']['20']['0']['1'] = 'flt.s'
instruction_table['0x14']['20']['0']['2'] = 'feq.s'
instruction_table['0x14']['20']['1']['0'] = 'fle.d'
instruction_table['0x14']['20']['1']['1'] = 'flt.d'
instruction_table['0x14']['20']['1']['2'] = 'feq.d'
instruction_table['0x14']['24']['0']['0'] = 'fcvt.w.s'
instruction_table['0x14']['24']['0']['1'] = 'fcvt.wu.s'
instruction_table['0x14']['24']['0']['2'] = 'fcvt.l.s'
instruction_table['0x14']['24']['0']['3'] = 'fcvt.lu.s'
instruction_table['0x14']['28']['0']['0']['0'] = 'fmv.x.s'
instruction_table['0x14']['28']['0']['0']['1'] = 'fclass.s'
instruction_table['0x14']['24']['1']['0'] = 'fcvt.w.d'
instruction_table['0x14']['24']['1']['1'] = 'fcvt.wu.d'
instruction_table['0x14']['24']['1']['2'] = 'fcvt.l.d'
instruction_table['0x14']['24']['1']['3'] = 'fcvt.lu.d'
instruction_table['0x14']['28']['1']['0']['0'] = 'fmv.x.d'
instruction_table['0x14']['28']['1']['0']['1'] = 'fclass.d'
instruction_table['0x14']['26']['0']['0'] = 'fcvt.s.w'
instruction_table['0x14']['26']['0']['1'] = 'fcvt.s.wu'
instruction_table['0x14']['26']['0']['2'] = 'fcvt.s.l'
instruction_table['0x14']['26']['0']['3'] = 'fcvt.s.lu'
instruction_table['0x14']['26']['1']['0'] = 'fcvt.d.w'
instruction_table['0x14']['26']['1']['1'] = 'fcvt.d.wu'
instruction_table['0x14']['26']['1']['2'] = 'fcvt.d.l'
instruction_table['0x14']['26']['1']['3'] = 'fcvt.d.lu'
instruction_table['0x14']['30']['0']['0'] = 'fmv.s.x'
instruction_table['0x14']['30']['1']['0'] = 'fmv.d.x'
instruction_table['0x01']['2'] = 'flw'
instruction_table['0x01']['3'] = 'fld'
instruction_table['0x09']['2'] = 'fsw'
instruction_table['0x09']['3'] = 'fsd'
instruction_table['0x10']['0'] = 'fmadd.s'
instruction_table['0x11']['0'] = 'fmsub.s'
instruction_table['0x12']['0'] = 'fnmsub.s'
instruction_table['0x13']['0'] = 'fnmadd.s'
instruction_table['0x10']['1'] = 'fmadd.d'
instruction_table['0x11']['1'] = 'fmsub.d'
instruction_table['0x12']['1'] = 'fnmsub.d'
instruction_table['0x13']['1'] = 'fnmadd.d'
# SYSTEM
instruction_table['0x1c']['0']['0'] = 'ecall'
instruction_table['0x1c']['0']['1'] = 'ebreak'
instruction_table['0x1c']['0']['2'] = 'uret'
instruction_table['0x1c']['0']['258'] = 'sret'
instruction_table['0x1c']['0']['514'] = 'hret'
instruction_table['0x1c']['0']['770'] = 'mret'
instruction_table['0x1c']['0']['260'] = 'sfence.vm'
instruction_table['0x1c']['0']['261'] = 'wfi'
instruction_table['0x1c']['1'] = 'csrrw'
instruction_table['0x1c']['2'] = 'csrrs'
instruction_table['0x1c']['3'] = 'csrrc'
instruction_table['0x1c']['5'] = 'csrrwi'
instruction_table['0x1c']['6'] = 'csrrsi'
instruction_table['0x1c']['7'] = 'csrrci'
| [
6738,
17268,
1330,
4277,
11600,
198,
198,
8625,
2762,
62,
4868,
796,
37456,
25,
12286,
11600,
7,
8625,
2762,
62,
4868,
8,
198,
8625,
2762,
62,
11487,
796,
12064,
62,
4868,
3419,
198,
198,
2,
31367,
2624,
40,
198,
2235,
20551,
198,
8... | 1.918107 | 4,225 |
# Podemos modificar a saídas dos números sem modificar os valores reais
# Ex: Número 2 => Valor é 2. Saída => 2.00 => Valor ainda é 2
# Número 2.5 => Valor é 2.5 Saída => 2 => Valor ainda é 2.5
numero = 1.61
print(numero)
# Para inteiros uso o 'd' e para floats uso o 'f'
# para numeros quebrados ele arredonda
print('{:.2f}'.format(numero))
print('{:.1f}'.format(numero))
print('{:.0f}'.format(numero))
print(f'{numero:.2f}')
print(f'{numero:.1f}')
print(f'{numero:.0f}')
| [
2,
17437,
368,
418,
953,
811,
283,
257,
473,
8836,
67,
292,
23430,
299,
21356,
647,
418,
5026,
953,
811,
283,
28686,
1188,
2850,
302,
15152,
198,
2,
1475,
25,
399,
21356,
647,
78,
362,
5218,
3254,
273,
38251,
362,
13,
10318,
8836,
... | 2.129464 | 224 |
from rest_framework import viewsets
from rest_framework.response import Response
from . serializers import *
from . models import *
from . api_renderers import GeoJsonRenderer
from rest_framework.settings import api_settings
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
764,
11389,
11341,
1330,
1635,
198,
6738,
764,
4981,
1330,
1635,
198,
6738,
764,
40391,
62,
10920,
19288,
1330,
32960,
41,
1559,
49,... | 4.017544 | 57 |
import os
import tornado.ioloop
import tornado.web
port = os.environ.get('PORT', 8080)
if __name__ == "__main__":
app = make_app()
app.listen(port)
tornado.ioloop.IOLoop.current().start()
| [
11748,
28686,
198,
198,
11748,
33718,
13,
1669,
11224,
198,
11748,
33718,
13,
12384,
198,
198,
634,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
15490,
3256,
4019,
1795,
8,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1... | 2.506173 | 81 |
"""
backend URL Configuration
See the voter_validation folder for more details.
"""
import backend.settings as settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
url(r'^private/admin/', include(admin.site.urls)),
url(r'^', include('voter_validation.urls')),
url(r'^favicon.ico$', RedirectView.as_view(
url=settings.STATIC_URL + "favicon.ico",
permanent=False),
name="favicon"),
# Keep robots away from herokuapp website.
url(r'^robots\.txt', include('hide_herokuapp.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
37811,
198,
1891,
437,
10289,
28373,
198,
6214,
262,
10765,
62,
12102,
341,
9483,
329,
517,
3307,
13,
198,
37811,
198,
11748,
30203,
13,
33692,
355,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6... | 2.57 | 300 |
import requests
from bs4 import BeautifulSoup | [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486
] | 4.090909 | 11 |
from .models import WALL, EMPTY
from .a_star_search import reconstruct_path, end_reached
START = 100
GOAL = 200
HORIZONTAL_PATH = 300
VERTICAL_PATH = 400
CHAR_MAPPING = {
WALL: '#',
EMPTY: ' ',
GOAL: 'E',
HORIZONTAL_PATH: '-',
VERTICAL_PATH: '|',
START: 'S'
}
| [
6738,
764,
27530,
1330,
370,
7036,
11,
38144,
9936,
198,
6738,
764,
64,
62,
7364,
62,
12947,
1330,
31081,
62,
6978,
11,
886,
62,
260,
2317,
628,
198,
2257,
7227,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
796,
1802,
198,
11... | 1.882022 | 178 |
if __name__ == "__main__":
generator = parrot()
generator.send(None)
generator.send("Hello")
generator.send("World")
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
17301,
796,
1582,
10599,
3419,
198,
220,
220,
220,
17301,
13,
21280,
7,
14202,
8,
198,
220,
220,
220,
17301,
13,
21280,
7203,
15496,
4943,
198,
2... | 2.596154 | 52 |
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from .mlp_layer import MLP
from cogdl.utils import get_activation, mul_edge_softmax, get_norm_layer
class ResGNNLayer(nn.Module):
"""
Implementation of DeeperGCN in paper `"DeeperGCN: All You Need to Train Deeper GCNs"` <https://arxiv.org/abs/2006.07739>
Parameters
-----------
conv : nn.Module
An instance of GNN Layer, recieving (graph, x) as inputs
n_channels : int
size of input features
activation : str
norm: str
type of normalization, ``batchnorm`` as default
dropout : float
checkpoint_grad : bool
"""
| [
6738,
19720,
1330,
32233,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
26791,
13,
9122,
4122,
1330,
26954,
198,
198,
6738,
764,
4029,
79,
... | 2.817829 | 258 |
import sys
import os
import threading
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath("../../IoTPy/multiprocessing"))
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
sys.path.append(os.path.abspath("../signal_processing_examples"))
# multicore is in ../../IoTPy/multiprocessing
from multicore import shared_memory_process, Multiprocess
# op, sink, source, merge are in ../../IoTPy/agent_types
from op import map_element
from sink import stream_to_file
from source import source_list_to_stream
from merge import zip_map
# stream is in ../../IoTPy/core
from stream import Stream, StreamArray
# window_dot_product is in ../signal_processing_examples
from window_dot_product import window_dot_product
#-----------------------------------------------------------------------
# TESTS
#-----------------------------------------------------------------------
if __name__ == '__main__':
test()
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
4704,
278,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
397,
2777,
776,
7203,
40720,
40720,
40,
78,
7250,
88,
14,
1... | 2.93733 | 367 |
import tensorflow as tf
from customParameters import *
import myMethod as myMethod
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import argparse
# use method:
# python predict.py --model myModel --type whole_history_epoch
# python predict.py --model myVGG --type whole --load_path /Users/wyc/Downloads/cp-0560.ckpt
# python predict.py --model myModel --type whole_history_epoch --train_name withoutFirstBN --total_epoch 171
parser = argparse.ArgumentParser(description='predicted with confusion matrix')
parser.add_argument('--model', type=str, default='myModel')
parser.add_argument('--type', type=str, default='whole')
parser.add_argument('--load_path', type=str)
parser.add_argument('--train_name', type=str, default='newTrain')
parser.add_argument('--total_epoch', type=int, default=600)
# parser.add_argument('--gpus', type=int, default=1)
args = parser.parse_args()
max_epoch = args.total_epoch
test_private_path = "./data/FER2013/private_test.csv"
private_test_data = myMethod.get_dataset_test(test_private_path)
private_test_data = private_test_data.map(myMethod.preprocess_DAtestdata)
# get standard result
correct_answer = np.loadtxt(test_private_path, dtype=np.int, delimiter=',',
skiprows=1, usecols=(0), encoding='utf-8')
# correct_answer = correct_answer.repeat(10)
if args.model == 'myVGG':
model = myMethod.create_myVGG()
else:
model = myMethod.create_myModel()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=["accuracy"])
if args.type == 'whole':
load_path = args.load_path
y = get_acc_predict(load_path)
myMethod.plot_heat_map(y, correct_answer)
if args.type == 'whole_history_epoch':
testname = args.train_name
get_history_acc(testname)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
2183,
48944,
1330,
1635,
198,
11748,
616,
17410,
355,
616,
17410,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
... | 2.70649 | 678 |
class LinkConversionData(object,IDisposable):
"""
This class contains the information necessary to re-create a Revit document
from an external source.
"""
def Dispose(self):
""" Dispose(self: LinkConversionData) """
pass
def GetOptions(self):
"""
GetOptions(self: LinkConversionData) -> IDictionary[str,str]
Extra information used during the creation of the Revit document.
Returns: The extra information used during the creation of the Revit document.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LinkConversionData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LinkConversionData) -> bool
"""
Path=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The path to the source data used to generate the model.
Get: Path(self: LinkConversionData) -> str
"""
ServerId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The service responsible for converting the data into a Revit file.
Get: ServerId(self: LinkConversionData) -> Guid
"""
| [
4871,
7502,
3103,
9641,
6601,
7,
15252,
11,
2389,
271,
1930,
540,
2599,
201,
198,
37227,
201,
198,
770,
1398,
4909,
262,
1321,
3306,
284,
302,
12,
17953,
257,
5416,
270,
3188,
201,
201,
198,
220,
220,
220,
422,
281,
7097,
2723,
13,
... | 2.985318 | 613 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ply import lex
from ply import yacc
from solar import errors
tokens = (
"STRING",
"AND",
"OR",
"LPAREN",
"RPAREN")
t_STRING = r'[A-Za-z0-9-_/\\]+'
t_AND = '&|,'
t_OR = r'\|'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_ignore = ' \t\r\n'
def p_expression_logical_op(p):
"""Parser
expression : expression AND expression
| expression OR expression
"""
result, arg1, op, arg2 = p
if op == '&' or op == ',':
result = lambda: arg1() and arg2()
elif op == '|':
result = lambda: arg1() or arg2()
p[0] = SubexpressionWrapper(result)
def p_expression_string(p):
"""Parser
expression : STRING
"""
p[0] = ScalarWrapper(p[1])
def p_expression_group(p):
"""Parser
expression : LPAREN expression RPAREN
"""
p[0] = p[2]
lexer = lex.lex()
parser = yacc.yacc(debug=False, write_tables=False)
expression = None
| [
2,
220,
220,
220,
15069,
1853,
7381,
20836,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
... | 2.508972 | 613 |
import os
#图像分类
from cv2 import cv2
from PIL import Image,ImageDraw
from datetime import datetime
import time
from pytesseract import image_to_string
#detectFaces()返回图像中所有人脸的矩形坐标(矩形左上、右下顶点)
#使用haar特征的级联分类器haarcascade_frontalface_default.xml,在haarcascades目录下还有其他的训练好的xml文件可供选择。
#注:haarcascades目录下训练好的分类器必须以灰度图作为输入。
#保存人脸图
#在原图像上画矩形,框出所有人脸。
#调用Image模块的draw方法,Image.open获取图像句柄,ImageDraw.Draw获取该图像的draw实例,然后调用该draw实例的rectangle方法画矩形(矩形的坐标即
#detectFaces返回的坐标),outline是矩形线条颜色(B,G,R)。
#注:原始图像如果是灰度图,则去掉outline,因为灰度图没有RGB可言。drawEyes、detectSmiles也一样。
#检测眼睛,返回坐标
#由于眼睛在人脸上,我们往往是先检测出人脸,再细入地检测眼睛。故detectEyes可在detectFaces基础上来进行,代码中需要注意“相对坐标”。
#当然也可以在整张图片上直接使用分类器,这种方法代码跟detectFaces一样,这里不多说。
#在原图像上框出眼睛.
#检测笑脸
#在原图像上框出笑脸
| [
11748,
28686,
198,
2,
32368,
122,
161,
225,
237,
26344,
228,
163,
109,
119,
198,
6738,
269,
85,
17,
1330,
269,
85,
17,
198,
6738,
350,
4146,
1330,
7412,
11,
5159,
25302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,... | 0.882289 | 926 |
import re
from utils.rules.base import Base
| [
11748,
302,
198,
198,
6738,
3384,
4487,
13,
38785,
13,
8692,
1330,
7308,
628
] | 3.285714 | 14 |
__version__ = "0.1.0"
import os
import discord
from dotenv import load_dotenv
load_dotenv()
bot = AutoDisconnect()
token = os.environ.get("TOKEN", None)
if token is None or len(token.strip()) == 0:
print("\nA bot token is necessary for the bot to function.\n")
raise RuntimeError
else:
bot.run(token)
| [
834,
9641,
834,
796,
366,
15,
13,
16,
13,
15,
1,
198,
198,
11748,
28686,
198,
198,
11748,
36446,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
198,
2220,
62,
26518,
24330,
3419,
628,
198,
198,
13645,
796,
11160,
7279,
... | 2.711864 | 118 |
from typing import Callable, Tuple
| [
6738,
19720,
1330,
4889,
540,
11,
309,
29291,
628
] | 4 | 9 |
"""Add titles to charts and tables
This is in preparation for storing the titles of charts and tables in
a separate database column, rather than as part of the JSON objects.
Revision ID: 2019_03_21_add_titles
Revises: 2019_03_20_tidy_up_dimension
Create Date: 2019-03-14 15:11:33.560576
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2019_03_21_add_titles"
down_revision = "2019_03_20_tidy_up_dimension"
branch_labels = None
depends_on = None
| [
37811,
4550,
8714,
284,
15907,
290,
8893,
198,
198,
1212,
318,
287,
11824,
329,
23069,
262,
8714,
286,
15907,
290,
8893,
287,
198,
64,
4553,
6831,
5721,
11,
2138,
621,
355,
636,
286,
262,
19449,
5563,
13,
198,
198,
18009,
1166,
4522,
... | 3.017857 | 168 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Step2 : BLAST hits were parsed to retrieve associated taxonomic information
# using the NCBI's taxonomy database.
# Downloaded resource:
# wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
# tar -zxvf taxdump.tar.gz
# https://www.biostars.org/p/222183/
# This script is to obtain lineage from tax id
import re
import sys
import os
from subprocess import Popen, PIPE
PATH="/Users/leyu/Documents/coding/evolution_code/HGT_analysis/HGT_prediction_tool/"
# ncbi gi dump files location
# PATH="./"
LINNAEUS_FILTER = ["species","genus","family","order","class","phylum","kingdom","superkingdom"]
# print >> sys.stderr, 'taxa_library_cluster initializing :: Building Names database'
# g_names = buildNames()
# print >> sys.stderr, 'taxa_library_cluster initializing :: Building Nodes database'
# (g_nodes, g_levels) = buildNodes()
if __name__== "__main__":
query = 559292
queryName = "Saccharomyces cerevisiae"
names = buildNames()
nodes, levels = buildNodes()
taxonomy = buildLineage(query, names, nodes)
result = str(query)+"\t"+queryName+"|"
for level in LINNAEUS_FILTER:
if level in taxonomy:
result += "\t"+str(taxonomy[level][0])+"\t"+taxonomy[level][1]+"|"
else:
result += "\tnone\tnone"
print(result)
# Results:
# 4932 Saccharomyces cerevisiae| 4932 Saccharomyces cerevisiae| 4930 Saccharomyces| 4893 Saccharomycetaceae|
# 4892 Saccharomycetales| 4891 Saccharomycetes| 4890 Ascomycota| 4751 Fungi| 2759 Eukaryota|
# [Finished in 11.1s]
# result = giListLookup([6322])
# print(result)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5012,
17,
1058,
9878,
11262,
7127,
547,
44267,
284,
19818,
3917,
1687,
40036,
1321,
220,
198,
2,
220,
197,
19... | 2.459064 | 684 |
from whylabs.logs.proto import InferredType
from whylabs.logs.core.statistics import SchemaTracker
Type = InferredType.Type
| [
6738,
1521,
75,
8937,
13,
6404,
82,
13,
1676,
1462,
1330,
554,
18186,
6030,
198,
6738,
1521,
75,
8937,
13,
6404,
82,
13,
7295,
13,
14269,
3969,
1330,
10011,
2611,
35694,
198,
6030,
796,
554,
18186,
6030,
13,
6030,
628,
628,
628,
628... | 3 | 45 |
import msgpack
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import settings
app = FastAPI()
origins = [
'http://localhost:3000',
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)
@app.get("/sync/{metric}") | [
11748,
31456,
8002,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
3049,
15042,
13,
27171,
1574,
13,
66,
669,
1330,
23929,
12310,
2509,
1574,
198,
11748,
6460,
198,
198,
1324,
796,
12549,
17614,
3419,
198,
198,
11612,
1040,
796,
... | 2.518248 | 137 |
import asyncio
import os
from threading import Timer
import aiofiles
import aiohttp
import pandas as pd
import untangle
from aiohttp import ClientSession
from progress.bar import Bar
username = os.getenv("HAMQTH_USERNAME")
password = os.getenv("HAMQTH_PASSWORD")
api_root = "https://www.hamqth.com/xml.php"
auth_uri = f"{api_root}?u={username}&p={password}"
data_dir = "data/hamqth"
# Get HamQTH Session I
session_id = SessionToken()
async def fetch_html(url: str, session: ClientSession, **kwargs) -> str:
"""GET request wrapper to fetch page HTML.
kwargs are passed to `session.request()`.
"""
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status()
# logger.info("Got response [%s] for URL: %s", resp.status, url)
html = await resp.text()
return html
dir = "data/reverse_beacon_network"
for file in os.listdir(dir):
asyncio.run(retrieve_callsigns(directory=dir, file=file))
| [
11748,
30351,
952,
198,
11748,
28686,
198,
6738,
4704,
278,
1330,
5045,
263,
198,
198,
11748,
257,
952,
16624,
198,
11748,
257,
952,
4023,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1418,
9248,
198,
6738,
257,
952,
4023,
1330,
... | 2.771676 | 346 |
"""
Question Source:Leetcode
Level: Easy
Topic: Math
Solver: Tayyrov
Date: 28.05.2022
"""
from typing import List
| [
37811,
201,
198,
24361,
8090,
25,
3123,
316,
8189,
201,
198,
4971,
25,
16789,
201,
198,
33221,
25,
16320,
201,
198,
50,
14375,
25,
25569,
88,
18657,
201,
198,
10430,
25,
2579,
13,
2713,
13,
1238,
1828,
201,
198,
37811,
201,
198,
673... | 2.574468 | 47 |
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.GeneralSettingsView.as_view(), name='general'),
url(r'^technical/$', views.TechnicalSettingsView.as_view(), name='technical'),
url(r'^teller/$', views.TellerSettingsView.as_view(), name='teller'),
url(r'^email/$', views.EmailSettingsView.as_view(), name='email'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
... | 2.858156 | 141 |
# Copyright 2009 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the TestResult class, each instance of which holds status information for a single test method."""
from __future__ import print_function
import datetime
import sys
import time
import traceback
import six
from testify.utils import inspection
__testify = 1
# vim: set ts=4 sts=4 sw=4 et:
| [
2,
15069,
3717,
44628,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,... | 3.83913 | 230 |
from Database import Database_connection as db
database = db()
qy = """
INSERT INTO `berita_detail` (`judul`, `waktu`, `tag`, `isi`, `sumber`) VALUES
('Mentan harap tatanan normal baru pulihkan permintaan produk pertanian', '2020-06-07', '[normal baru,new normal,petani]', ' Dengan kebijakan normal baru utamanya di sektor pariwisata diharapkan dapat memulihkan permintaan produk pertanian Jakarta (ANTARA) - Menteri Pertanian Syahrul Yasin Limpo berharap tatanan normal baru dapat mendongkrak kesejahteraan petani dan memulihkan permintaan produk pertanian dengan dimulainya aktivitas hotel, restoran, katering (Horeka) dan perkantoran. Dampak yang ditimbulkan akibat pandemi ini masih dirasakan masyarakat, termasuk para petani. Faktor yang mempengaruhi petani yakni harga produk pertanian mengalami tekanan diakibatkan oleh panen raya musim tanam pertama. \"Kondisi ini menyebabkan deflasi kelompok bahan makanan dimana jumlah bahan pangan di lapangan banyak namun permintaan berkurang berakibat langsung dengan pendapatan petani,\" kata Syahrul dalam keterangan di Jakarta, Minggu. Selain itu, petani juga dihadapkan pada gangguan distribusi akibat Pembatasan Sosial Berskala Besar (PSBB), penurunan daya beli masyarakat, melemahnya sektor ekonomi yang terkait dengan sektor pertanian seperti Horeka dan perkantoran. Menurut Mentan, selama pandemi deflasi kelompok bahan makanan masih berimplikasi positif terhadap stabilitas sosial dan politik. Untuk mengurangi dampak ke pendapatan yang diterima petani, pemerintah memberikan bantuan sosial yang dapat mengkompensasi penurunan daya beli petani yang diakibatkan oleh penurunan harga produk pertanian. \"Dengan kebijakan normal baru utamanya di sektor pariwisata diharapkan dapat memulihkan permintaan produk pertanian sehingga dapat memperbaiki harga di tingkat petani,\" kata Syahrul. Kementerian Pertanian (Kementan) mencatat bahwa panen raya musim pertama sukses mengamankan stok pangan sehingga tidak terjadi gejolak kenaikan harga dan tersendatnya distribusi 11 bahan pokok khususnya dalam menghadapi Ramadhan dan Hari Raya Idul Fitri. Eksport komoditas pertanian juga masih tumbuh sebesar 12,6 persen. Namun demikian, Nilai Tukar Petani (NTP) diakui memang turun akibat pandemi. Syahrul menilai kondisi ini hanya sesaat. Menurut Mentan, kunci meningkatkan NTP adalah menyeimbangkan penawaran dan permintaan. Kebijakan pemerintah untuk membuka sektor pariwisata dan aktivitas perkantoran harus dipersiapkan dengan baik karena dengan keberhasilan kebijakan ini dapat berkontribusi terhadap perbaikan harga di tingkat petani. Menghadapi fenomena yang terjadi di kalangan petani, Mentan Syahrul mengatakan bahwa pihaknya sedang melakukan berbagai upaya salah satunya melakukan pengendalian dari sisi harga pertanian melalui koordinasi Bulog dan Kementerian Perdagangan. Pewarta: Mentari Dwi Gayati Editor: Ahmad Wijaya COPYRIGHT © ANTARA 2020 (adsbygoogle = window.adsbygoogle || []).push({}); ', 'antara')
"""
try:
database.kursor.execute(qy)
database.koneksi.commit()
gen_id = database.kursor.lastrowid
print(gen_id)
except Exception as ex:
database.koneksi.rollback()
print(ex)
| [
6738,
24047,
1330,
24047,
62,
38659,
355,
20613,
198,
48806,
796,
20613,
3419,
198,
80,
88,
796,
37227,
198,
20913,
17395,
39319,
4600,
527,
5350,
62,
49170,
63,
357,
63,
10456,
377,
47671,
4600,
86,
461,
28047,
47671,
4600,
12985,
4767... | 2.737024 | 1,156 |
# coding: utf-8
"""
@Topic:
@Date: 2020/12/8
@Author: other.z
@Copyright(C): 2020-2023 other.z Inc. All rights reserved.
"""
import py
import os
import json
import yaml
import warnings
from dotenv import dotenv_values, find_dotenv
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
198,
2488,
33221,
25,
628,
2488,
10430,
25,
12131,
14,
1065,
14,
23,
628,
2488,
13838,
25,
584,
13,
89,
628,
2488,
15269,
171,
120,
230,
34,
171,
120,
231,
25,
12131,
12,
1238,
1954,
5... | 2.666667 | 93 |
import pytest
import numpy as np
from astropy.wcs import WCS
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import proj_plane_pixel_area
from mosviz.viewers.mos_viewer import MOSVizViewer
def construct_test_wcs(ra=0., dec=0., x=0., y=0., area=1.0):
"""
Constructs an `~astropy.wcs.WCS` object according to params.
WCS object will always have ['RA---TAN', 'DEC--TAN'] transform.
Parameters
----------
ra, dec : float
Center (ra, dec) in deg
x, y : float
Center (x, y) pixell
area : float
Projected plane pixel area (deg**2/pix)
Returns
--------
wcs : `~astropy.wcs.WCS`
"""
axis_scale = np.sqrt(area)
w = WCS()
w.wcs.crval = [ra, dec]
w.wcs.crpix = [x, y]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.cdelt = np.array([-axis_scale, axis_scale])
return w
def check_is_close(a, b, name_a='a', name_b='b'):
"""
Parameters
----------
a, b : numeric
Values to be compared.
name_a, name_b: str
Variable name of values.
Displayed if exception is raised.
Raises
------
Exception: If a and b and not close
"""
if not np.isclose(a, b):
raise Exception("{0} and {1} alues are not close: "
"np.isclose({0}, {1})".format(name_a, name_b, a, b))
def check_all_close(a, b, name_a='a', name_b='b'):
"""
Parameters
----------
a, b : array
arrays to be compared.
name_a, name_b : str
Variable name of arrays.
Displayed if exception is raised.
Raises
------
Exception: If values in a and b and not close
"""
if not np.allclose(a, b):
raise Exception("{0} and {1} alues are not close: "
"np.isclose({0}, {1})".format(name_a, name_b, a, b))
def check_rectangle_patch_attr(slit, x, y, width, length):
"""
Chcek the patch position, dimension and bounds.
Params are the expected values.
Parameters
----------
slit : _MOSVizSlit
Slit to be tested.
x, y, width, length : float
Correct center x pixel, center y pixel, width and length.
"""
x_bounds = np.array([x - (width / 2.), x + (width / 2.)])
y_bounds = np.array([y - (length / 2.), y + (length / 2.)])
check_is_close(slit.x, x)
check_is_close(slit.y, y)
check_is_close(slit.dx, width)
check_is_close(slit.dy, length)
check_is_close(slit.width, width)
check_is_close(slit.length, length)
check_all_close(np.array(slit.x_bounds), x_bounds)
check_all_close(np.array(slit.y_bounds), y_bounds)
check_all_close(np.array(slit.y_bounds), y_bounds)
def check_clear_slits(slit_controller):
"""Make sure all attributes are reset"""
assert slit_controller.has_slits is False
assert len(slit_controller.slits) == 0
def test_construct_pix_region(glue_gui):
"""Test the `SlitController.construct_pix_region` function"""
mosviz_gui = MOSVizViewer(glue_gui.session)
slit_controller = mosviz_gui.slit_controller
# Construct a 20x10 (l x w) rectangle
x, y = (5., 10.)
width = 10.
length = 20.
assert not slit_controller.has_slits
slit_controller.add_rectangle_pixel_slit(x=x, y=y, width=width, length=length)
assert slit_controller.has_slits
check_rectangle_patch_attr(slit_controller.slits[0], x, y, width, length)
# Test move function for this parch
x, y = (500., 100.)
slit_controller.slits[0].move(x=x, y=y)
check_rectangle_patch_attr(slit_controller.slits[0], x, y, width, length)
# Test drawing the slit
mosviz_gui.image_widget.draw_slit()
# Test removing the paths
slit_controller.clear_slits()
check_clear_slits(slit_controller)
mosviz_gui.close(warn=False)
def test_construct_sky_region(glue_gui):
"""Test the `SlitController.construct_sky_region` function"""
mosviz_gui = MOSVizViewer(glue_gui.session)
slit_controller = mosviz_gui.slit_controller
ra, dec = (10., 30.)
x, y = (5., 10.)
ang_width = 1. # deg
ang_length = 2. # deg
area = 0.1 # deg**2/pix
scale = np.sqrt(area)
width = ang_width / scale
length = ang_length / scale
# note fits indexing starts at 1
wcs = construct_test_wcs(ra, dec, x+1, y+1, area)
assert not slit_controller.has_slits
slit_controller.add_rectangle_sky_slit(wcs, ra, dec,
(ang_width*u.deg).to(u.arcsec),
(ang_length*u.deg).to(u.arcsec))
assert slit_controller.has_slits
check_rectangle_patch_attr(slit_controller.slits[0], x, y, width, length)
# Test move function for this parch
x, y = (500., 100.)
slit_controller.slits[0].move(x=x, y=y)
check_rectangle_patch_attr(slit_controller.slits[0], x, y, width, length)
# Test drawing the slit
mosviz_gui.image_widget.draw_slit()
# Test removing the paths
slit_controller.clear_slits()
check_clear_slits(slit_controller)
mosviz_gui.close(warn=False)
def test_launch_editor(glue_gui):
"""Test launching the slit editor"""
mosviz_gui = MOSVizViewer(glue_gui.session)
slit_controller = mosviz_gui.slit_controller
ui = slit_controller.launch_slit_ui()
mosviz_gui.close(warn=False)
mosviz_gui = glue_gui.viewers[0][0]
slit_controller = mosviz_gui.slit_controller
ui = slit_controller.launch_slit_ui()
ui.close()
def test_current_slit(glue_gui):
"""Test the UI currently available for testing."""
mosviz_gui = glue_gui.viewers[0][0]
slit_controller = mosviz_gui.slit_controller
if "slit_width" in mosviz_gui.catalog.meta["special_columns"] and \
"slit_length" in mosviz_gui.catalog.meta["special_columns"] and \
mosviz_gui.cutout_wcs is not None:
assert slit_controller.has_slits
row = mosviz_gui.current_row
ra = row[mosviz_gui.catalog.meta["special_columns"]["slit_ra"]]
dec = row[mosviz_gui.catalog.meta["special_columns"]["slit_dec"]]
ang_width = row[mosviz_gui.catalog.meta["special_columns"]["slit_width"]]
ang_length = row[mosviz_gui.catalog.meta["special_columns"]["slit_length"]]
wcs = mosviz_gui.cutout_wcs
skycoord = SkyCoord(ra, dec, frame='fk5', unit="deg")
xp, yp = skycoord.to_pixel(wcs)
scale = np.sqrt(proj_plane_pixel_area(wcs)) * 3600.
dx = ang_width / scale
dy = ang_length / scale
check_is_close(dx, slit_controller.slits[0].dx)
check_is_close(dy, slit_controller.slits[0].dy)
check_is_close(xp, slit_controller.slits[0].x)
check_is_close(yp, slit_controller.slits[0].y)
| [
11748,
12972,
9288,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6468,
28338,
13,
12712,
1330,
45410,
198,
11748,
6468,
28338,
13,
41667,
355,
334,
198,
6738,
6468,
28338,
13,
37652,
17540,
1330,
5274,
7222,
585,
198,
6738,
... | 2.263493 | 2,983 |
from mmf.models.mmbt import MMBT
from mmxai.interpretability.classification.lime.lime_multimodal import LimeMultimodalExplainer
import numpy as np
from PIL import Image
from skimage.segmentation import mark_boundaries
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
import torch
import json
import re
model = MMBT.from_pretrained("mmbt.hateful_memes.images")
img_name = "" # replace this with image path
img_text = "" # replace this with memes text
img_try = Image.open(img_name)
# predict
output = model.classify(img_try, img_text)
label_to_explain = output["label"]
plt.imshow(img_try)
plt.axis("off")
plt.show()
hateful = "Yes" if output["label"] == 1 else "No"
print("Hateful as per the model?", hateful)
print(f"Model's confidence: {output['confidence'] * 100:.3f}%")
# explain using lime
text_exp, img_exp, txt_msg, img_msg = lime_multimodal_explain(
img_name,
img_text,
model,
label_to_explain,
num_samples=30000,
)
print(txt_msg, "\n", img_msg)
| [
6738,
8085,
69,
13,
27530,
13,
76,
2022,
83,
1330,
337,
10744,
51,
198,
6738,
8085,
87,
1872,
13,
27381,
1799,
13,
4871,
2649,
13,
27299,
13,
27299,
62,
16680,
320,
375,
282,
1330,
43503,
15205,
320,
375,
282,
18438,
10613,
198,
117... | 2.433107 | 441 |
"""
stdarray.py
The stdarray module defines functions related to creating, reading,
and writing one- and two-dimensional arrays.
"""
import _10_02.Dog.stdio as stdio
#=======================================================================
# Array creation functions
#=======================================================================
def create1D(length, value=None):
"""
Create and return a 1D array containing length elements, each
initialized to value.
"""
return [value] * length
#-----------------------------------------------------------------------
def create2D(rowCount, colCount, value=None):
"""
Create and return a 2D array having rowCount rows and colCount
columns, with each element initialized to value.
"""
a = [None] * rowCount
for row in range(rowCount):
a[row] = [value] * colCount
return a
#=======================================================================
# Array writing functions
#=======================================================================
def write1D(a):
"""
Write array a to sys.stdout. First write its length. bool objects
are written as 0 and 1, not False and True.
"""
length = len(a)
stdio.writeln(length)
for i in range(length):
# stdio.writef('%9.5f ', a[i])
element = a[i]
if isinstance(element, bool):
if element == True:
stdio.write(1)
else:
stdio.write(0)
else:
stdio.write(element)
stdio.write(' ')
stdio.writeln()
#-----------------------------------------------------------------------
def write2D(a):
"""
Write two-dimensional array a to sys.stdout. First write its
dimensions. bool objects are written as 0 and 1, not False and True.
"""
rowCount = len(a)
colCount = len(a[0])
stdio.writeln(str(rowCount) + ' ' + str(colCount))
for row in range(rowCount):
for col in range(colCount):
#stdio.writef('%9.5f ', a[row][col])
element = a[row][col]
if isinstance(element, bool):
if element == True:
stdio.write(1)
else:
stdio.write(0)
else:
stdio.write(element)
stdio.write(' ')
stdio.writeln()
#=======================================================================
# Array reading functions
#=======================================================================
def readInt1D():
"""
Read from sys.stdin and return an array of integers. An integer at
the beginning of sys.stdin defines the array's length.
"""
count = stdio.readInt()
a = create1D(count, None)
for i in range(count):
a[i] = stdio.readInt()
return a
#-----------------------------------------------------------------------
def readInt2D():
"""
Read from sys.stdin and return a two-dimensional array of integers.
Two integers at the beginning of sys.stdin define the array's
dimensions.
"""
rowCount = stdio.readInt()
colCount = stdio.readInt()
a = create2D(rowCount, colCount, 0)
for row in range(rowCount):
for col in range(colCount):
a[row][col] = stdio.readInt()
return a
#-----------------------------------------------------------------------
def readFloat1D():
"""
Read from sys.stdin and return an array of floats. An integer at the
beginning of sys.stdin defines the array's length.
"""
count = stdio.readInt()
a = create1D(count, None)
for i in range(count):
a[i] = stdio.readFloat()
return a
#-----------------------------------------------------------------------
def readFloat2D():
"""
Read from sys.stdin and return a two-dimensional array of floats.
Two integers at the beginning of sys.stdin define the array's
dimensions.
"""
rowCount = stdio.readInt()
colCount = stdio.readInt()
a = create2D(rowCount, colCount, 0.0)
for row in range(rowCount):
for col in range(colCount):
a[row][col] = stdio.readFloat()
return a
#-----------------------------------------------------------------------
def readBool1D():
"""
Read from sys.stdin and return an array of booleans. An integer at
the beginning of sys.stdin defines the array's length.
"""
count = stdio.readInt()
a = create1D(count, None)
for i in range(count):
a[i] = stdio.readBool()
return a
#-----------------------------------------------------------------------
def readBool2D():
"""
Read from sys.stdin and return a two-dimensional array of booleans.
Two integers at the beginning of sys.stdin define the array's
dimensions.
"""
rowCount = stdio.readInt()
colCount = stdio.readInt()
a = create2D(rowCount, colCount, False)
for row in range(rowCount):
for col in range(colCount):
a[row][col] = stdio.readBool()
return a
#=======================================================================
def _main():
"""
For testing.
"""
write2D(readFloat2D())
write2D(readBool2D())
if __name__ == '__main__':
_main()
| [
37811,
198,
19282,
18747,
13,
9078,
198,
198,
464,
14367,
18747,
8265,
15738,
5499,
3519,
284,
4441,
11,
3555,
11,
198,
392,
3597,
530,
12,
290,
734,
12,
19577,
26515,
13,
198,
37811,
198,
198,
11748,
4808,
940,
62,
2999,
13,
32942,
... | 2.802885 | 1,872 |
from gork.palette import COLOR_TREE, PALETTE
from gork.structs import RGB, Color
DEFAULT_PIXEL_SIZE = 10
DEFAULT_N_CLUSTERS = 256
| [
6738,
308,
967,
13,
18596,
5857,
1330,
20444,
1581,
62,
51,
11587,
11,
40795,
2767,
9328,
198,
6738,
308,
967,
13,
7249,
82,
1330,
25228,
11,
5315,
198,
198,
7206,
38865,
62,
47,
10426,
3698,
62,
33489,
796,
838,
198,
7206,
38865,
6... | 2.588235 | 51 |
from datetime import datetime
from pathlib import Path
from config_state.config_state import ConfigField
from config_state.config_state import ConfigState
from config_state.config_state import reference
from config_state.config_state import stateproperty
from config_state.config_state import StateVar
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
4566,
62,
5219,
13,
11250,
62,
5219,
1330,
17056,
15878,
198,
6738,
4566,
62,
5219,
13,
11250,
62,
5219,
1330,
17056,
9012,
198,
6738,
4566,
62,
5... | 3.926829 | 82 |
from requests.auth import AuthBase
import hmac
import base64
import hashlib
import urlparse
import urllib
#add your custom auth handler class to this module
#template
#example of adding a client certificate
#example of adding a client certificate
#cloudstack auth example | [
6738,
7007,
13,
18439,
1330,
26828,
14881,
198,
11748,
289,
20285,
198,
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
19016,
29572,
198,
11748,
2956,
297,
571,
198,
198,
2,
2860,
534,
2183,
6284,
21360,
1398,
284,
428,
8265,
1... | 2.934579 | 107 |
"""
This lists a raw balances response from the API
"""
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.properties import NumericProperty
from kivy.graphics import Color, Rectangle
from ..buttons import SingleMarketButton, RemoveNotificationButton, ActionDoButton
from ...notification import *
from pprint import pprint as pp
import datetime
class NotificationRow(GridLayout):
"""
The display Widget for individual notifications
"""
market=None
note=None
status=NumericProperty()
action=None
order=None
def __init__(self, notification, **kwargs):
"""
:param notification: (Notification) the notification object that is associated with this widget
"""
self.note=notification
self.widgets={}
super(NotificationRow, self).__init__(**kwargs, rows=1, size_hint=(1, 30))
self.refresh()
self.padding = [10,0,0,0]
self.bind(status=self.refresh)
# App.get_running_app().trader.notifications[id(self)] = self
def refresh(self):
"""
Appends each element of the notification row in order if it has not already been added.
In order:
- Remove Button
- Timestamp
- Message ( different colors depending on integer "Notification.level" )
- Action Button / Spacer ( Displayed if Notification.sender is Action and actions are not set to automatically execute. Executes action )
- Market Button / Spacer ( Display if Notification.sender is Market. Shows individual market window )
"""
if "remove" not in self.widgets:
self.widgets["remove"] = RemoveNotificationButton(self.note)
self.add_widget(self.widgets["remove"])
if "timestamp" not in self.widgets:
try:
self.widgets["timestamp"] = Label(text=self.note.time.strftime("%Y-%m-%d %H:%M"), size_hint_x=None, width=300, color=[.5,.5,.5,1])
self.add_widget(self.widgets["timestamp"])
except Exception as e:
pp(self.note)
if "message" not in self.widgets:
try:
if (self.note.level == 9):
self.widgets["message"] = Label(text=self.note.message, color=[1,.2,.2,1])
elif (self.note.level == 6):
self.widgets["message"] = Label(text=self.note.message, color=[1,1,.4,1])
elif (self.note.level == 3):
self.widgets["message"] = Label(text=self.note.message, color=[.2,1,.2,1])
else:
self.widgets["message"] = Label(text=self.note.message)
self.widgets["message"].canvas.before.clear()
# with self.widgets["message"].canvas.before:
# Color(1, 1, 1, 0.25)
# Rectangle(pos=self.widgets["message"].pos, size=self.widgets["message"].size)
self.add_widget(self.widgets["message"])
except Exception as e:
pp(self.note)
if self.note.action:
# if not self.action.done:
if "actionbutton" not in self.widgets:
self.widgets["actionbutton"] = ActionDoButton(self.note.action, size_hint_x=None, width=150)
self.add_widget(self.widgets["actionbutton"])
else:
self.widgets["actionbutton"].refresh()
# else:
# Error("Bad action button", self.note.action)
else:
if "actionspacer" not in self.widgets:
self.widgets["actionspacer"] = Label(width=150, size_hint_x=None)
self.add_widget(self.widgets["actionspacer"])
if self.note.market:
# if self.note.market.checkUpToDate():
if "marketbutton" not in self.widgets:
self.widgets["marketbutton"] = SingleMarketButton(self.note.market, size_hint_x=None, width=150)
self.add_widget(self.widgets["marketbutton"])
# else:
# Error("Bad market button", self.note.market)
else:
if "marketspacer" not in self.widgets:
self.widgets["marketspacer"] = Label(width=150, size_hint_x=None)
self.add_widget(self.widgets["marketspacer"])
| [
37811,
198,
1212,
8341,
257,
8246,
25223,
2882,
422,
262,
7824,
198,
37811,
198,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
25928,
39786,
1330,
24846,
32517,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
18242,
1330,
36052,
198,
6738,
47... | 2.153206 | 2,043 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Modules to import
#
import csv, json
#
# Custom functions
#
def nbSyllables(word, sep):
"""
Counts the number of syllables in a word
@param String word: the word
@param String sep: the separator to use
"""
return len(word.split(sep))
#
# Main
#
if __name__ == "__main__":
# Link to Lexique3 (not included with the app)
Lexique3 = '../assets/Lexique383/Lexique383.tsv'
# Link to the lexicon used with the app
Lexicon = '../store/lexicon.json'
"""
# How to count the number max of syllables
#
nbSyllablesMax = 0;
# Working with the tabulated version of Lexique v3.8.3
with open(Lexique3, newline='') as file:
# All the lines of the file
lines = csv.reader(file, delimiter='\t')
# For each line in the file…
for line in lines:
if (nbSyllables(line[22], '-') > nbSyllablesMax):
nbSyllablesMax = nbSyllables(line[22], '-')
print(nbSyllablesMax)
"""
"""
# How to find fields that are empty in the lexicon
#
# Error reporting
errors = ""
# Loads the lexicon as a JSON structure
with open(Lexicon) as file:
lexicon = json.load(file)
# For each word…
for word in lexicon["words"]:
# … if a value is missing…
for key, value in word.items():
if value == "":
# … prints the missing key and the referring word
errors += f'{key} of {word["word"]} is empty\n'
with open('errors.txt', 'w') as report:
report.write(errors)
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
198,
2,
220,
220,
3401,
5028,
284,
1330,
198,
2,
198,
11748,
269,
21370,
11,
33918,
198,
198,
2,
198,
2,
2... | 2.272727 | 726 |
# coding: UTF-8
import os
import json
import re
in_path = "/disk/mysql/law_data/final_data/"
out_path = "/disk/mysql/law_data/temp_data/"
mid_text = u"\t"
num_process = 4
num_file = 20
accusation_file = "/home/zhx/law_pre/data_processor/accusation_list2.txt"
f = open(accusation_file, "r")
accusation_list = json.loads(f.readline())
for a in range(0, len(accusation_list)):
accusation_list[a] = accusation_list[a].replace('[', '').replace(']', '')
f.close()
able_list = [248,247,201]
if __name__ == "__main__":
import multiprocessing
process_pool = []
for a in range(0, num_process):
process_pool.append(
multiprocessing.Process(target=work, args=(a * num_file / num_process, (a + 1) * num_file / num_process)))
for a in process_pool:
a.start()
for a in process_pool:
a.join()
| [
2,
19617,
25,
41002,
12,
23,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
302,
628,
198,
259,
62,
6978,
796,
12813,
39531,
14,
28744,
13976,
14,
6270,
62,
7890,
14,
20311,
62,
7890,
30487,
198,
448,
62,
6978,
796,
12813,
3... | 2.374302 | 358 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IBM Containerized Forecasting Workflow
DESCRIPTION
This file contains the class representing the different sources of input data sets.
AUTHOR
Timothy Lynar <timlynar@au1.ibm.com>, IBM Research, Melbourne, Australia
Frank Suits <frankst@au1.ibm.com>, IBM Research, Melbourne, Australia;
Dublin, Ireland; Yorktown, USA
Beat Buesser <beat.buesser@ie.ibm.com>, IBM Research, Dublin, Ireland
NOTICE
Licensed Materials - Property of IBM
"Restricted Materials of IBM"
Copyright IBM Corp. 2017 ALL RIGHTS RESERVED
US GOVERNMENT USERS RESTRICTED RIGHTS - USE, DUPLICATION OR DISCLOSURE
RESTRICTED BY GSA ADP SCHEDULE CONTRACT WITH IBM CORP.
THE SOURCE CODE FOR THIS PROGRAM IS NOT PUBLISHED OR OTHERWISE DIVESTED OF
ITS TRADE SECRETS, IRRESPECTIVE OF WHAT HAS BEEN DEPOSITED WITH
THE U. S. COPYRIGHT OFFICE. IBM GRANTS LIMITED PERMISSION TO LICENSEES TO
MAKE HARDCOPY OR OTHER REPRODUCTIONS OF ANY MACHINE- READABLE DOCUMENTATION,
PROVIDED THAT EACH SUCH REPRODUCTION SHALL CARRY THE IBM COPYRIGHT NOTICES
AND THAT USE OF THE REPRODUCTION SHALL BE GOVERNED BY THE TERMS AND
CONDITIONS SPECIFIED BY IBM IN THE LICENSED PROGRAM SPECIFICATIONS. ANY
REPRODUCTION OR USE BEYOND THE LIMITED PERMISSION GRANTED HEREIN SHALL BE A
BREACH OF THE LICENSE AGREEMENT AND AN INFRINGEMENT OF THE APPLICABLE
COPYRIGHTS.
"""
import os
import logging
import ftplib
from multiprocessing import current_process
import time
from datetime import datetime, timedelta
import subprocess
import shutil
import glob
from inputdataset import *
class InputDataSetGFS(InputDataSet):
'''
Global Forecast System (GFS) input data set file in grib format.
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'gfs_4_'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+'_'+str(self.date.hour).zfill(2)+\
'00_'+str(self.hour).zfill(3)+'.grb2'
class InputDataSetGFSp25(InputDataSet):
'''
Global Forecast System (GFS) input data set file in grib format.
0.25 degree gfs data ds084.1
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'gfs.0p25.'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+str(self.date.hour).zfill(2)+'.f'+\
str(self.hour).zfill(3)+'.grib2'
class InputDataSetFNL(InputDataSet):
'''
A class defining NCEP FNL Operational Model Global Tropospheric Analyses,
continuing from July 1999
To use this class you must specify your UCAR username (email address) and password
via the environment variables RDA_EMAIL and RDA_PASS
The FNL data is 1 degree unless it is after 2015-07-08 at which point
you can get 0.25 degree data.
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'fnl_'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+'_'+str(self.date.hour).zfill(2)+'_00.grib2'
class InputDataSetFNLp25(InputDataSet):
'''
NCEP FNL Operational Model Global Tropospheric Analyses, continuing from July 1999
To use this class you must specify your UCAR username (email address)
and password via the environment variables RDA_EMAIL and RDA_PASS
The FNL data is 1 degree unless it is after 2015-07-08 at which point you
can get 0.25 degree data.
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
#Using f00 for each file might not be what you were expecting. Edit if needed.
return 'gdas1.fnl0p25.'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+str(self.date.hour).zfill(2)+'.f00.grib2'
class InputDataSetCFSR(InputDataSet):
'''
Climate Forecast System Reanalysis (CFSR)
http://soostrc.comet.ucar.edu/data/grib/cfsr/
See: https://climatedataguide.ucar.edu/climate-data/climate-forecast-system-reanalysis-cfsr
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
outname = ''
if self.date.date() > datetime(2011, 04, 01).date():
outname = str(self.date.year)[2:]+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+str(self.date.hour).zfill(2)+\
'.cfsrr.t'+str(self.date.hour).zfill(2)+'z.pgrb2f00'
else:
outname = 'pgbh00.cfsr.'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+str(self.date.hour).zfill(2)+'.grb2'
return outname
class InputDataSetCFDDA(InputDataSet):
'''
NCAR Global Climate Four-Dimensional Data Assimilation (CFDDA) Hourly 40 km
Reanalysis dataset is a dynamically-downscaled dataset with
high temporal and spatial resolution that was created using NCAR's CFDDA system.
see: https://rda.ucar.edu/datasets/ds604.0/
This dataset contains hourly analyses with 28 vertical levels on a 40 km
horizontal grid (0.4 degree grid increment)
1985 to 2005
top hpa = 0.998
Documentation for this dataset can be found here:
https://rda.ucar.edu/datasets/ds604.0/docs/CFDDA_User_Documentation_Rev3.pdf
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'cfdda_'+str(self.date.year)+str(self.date.month).zfill(2)+\
str(self.date.day).zfill(2)+str(self.date.hour).zfill(2)+'.v2.nc'
def get_filename_prepared(self):
'''
Generate a filename for the processed output file for a given time
for this dataset
'''
return self.get_filename()+'.grb1'
def prepare(self, **args):
'''
Steps to transform the downloaded input data into the files needed
by WPS or by other functions as required
'''
logging.info('WPS: Converting netCDF to GRIB1 file for WPS')
try:
os.chdir(self.path)
for filename in glob.glob('*.nc'):
process = subprocess.Popen(['ncks', '-3', filename, 'temp.nc'])
process.wait()
process = subprocess.Popen(['cdo', '-a', '-f', 'grb1', 'copy',
'temp.nc', filename+'.grb1'])
process.wait()
os.remove('temp.nc')
except:
logging.warning('WPS: Converting netCDF to GRIB1 file for WPS Failed')
class InputDataSetERAISFC(InputDataSet):
'''
ECWMF Reanalysis Interim (ERA-I) grib file sfc files
NOTE: You need to download the data to your own server then edit this entry.
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'ERA-Int_sfc_'+str(self.date.year)+str(self.date.month).zfill(2)+'01.grb'
class InputDataSetERAIML(InputDataSet):
'''
ECWMF Reanalysis Interim (ERA-I) grib file files Model level data.
NOTE: You need to download the data to your own server then edit this entry.
'''
# pylint: disable=too-many-instance-attributes
def get_filename(self):
'''
Generate a filename to download for this dataset for the time given.
'''
return 'ERA-Int_ml_'+str(self.date.year)+str(self.date.month).zfill(2)+'01.grb'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
9865,
44,
43101,
1143,
4558,
19913,
5521,
11125,
198,
198,
30910,
40165,
628,
220,
220,
220,
770,
2393,
490... | 2.411765 | 3,400 |
from math import floor
import cv2 as cv
from conf import DATA_MODEL_DIR
from readers.base import BaseReader
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
| [
6738,
10688,
1330,
4314,
198,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
198,
6738,
1013,
1330,
42865,
62,
33365,
3698,
62,
34720,
198,
6738,
7183,
13,
8692,
1330,
7308,
33634,
628,
198,
43016,
796,
357,
15,
11,
14280,
11,
657,
8,
... | 2.745455 | 55 |
#17 Heat Capacity
#Asking for masss of water.
m = float(input("Enter the mass of water in grams = "))
T = float(input("Enter the temperature change in degree celsius = "))
C = 4.186 * (2.7777e-7)
q = m * T * C
print("Total energy required to raise ",m," grams of a material by ",T," degrees Celsius is ",q," kilowatt-hour")
cost = (8.9 * q) / 240
print("Cost of electricity for boiling water for a cup of coffee is ",cost," cents")
| [
2,
1558,
12308,
29765,
201,
198,
2,
1722,
3364,
329,
2347,
82,
286,
1660,
13,
201,
198,
76,
796,
12178,
7,
15414,
7203,
17469,
262,
2347,
286,
1660,
287,
16379,
796,
366,
4008,
201,
198,
51,
796,
12178,
7,
15414,
7203,
17469,
262,
... | 2.979866 | 149 |
# -*- coding: utf-8 -*-
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for the MappingColumnHandler class"""
import unittest
from mock import MagicMock
from mock import patch
from ggrc.converters.handlers.handlers import MappingColumnHandler
class MappingColumnHandlerTestCase(unittest.TestCase):
"""Base class for MappingColumnHandler tests"""
class IsAllowedMappingByTypeTestCase(MappingColumnHandlerTestCase):
"""Tests for the _is_allowed_mapping_by_type() method"""
# pylint: disable=invalid-name
def test_returns_false_for_regulation(self):
"""The method should return True if destination is 'Regulation'."""
# pylint: disable=protected-access
result = self.handler._is_allowed_mapping_by_type('Product', 'Regulation')
self.assertFalse(result)
def test_returns_false_for_standard(self):
"""The method should return True if destination is 'Standard'."""
# pylint: disable=protected-access
result = self.handler._is_allowed_mapping_by_type('Product', 'Standard')
self.assertFalse(result)
def test_returns_true_for_other_types(self):
"""The method should return True if destination is other."""
# pylint: disable=protected-access
result = self.handler._is_allowed_mapping_by_type('Product', 'Control')
self.assertTrue(result)
class AddMappingWarningTestCase(MappingColumnHandlerTestCase):
"""Tests for the _add_mapping_warning() method"""
# pylint: disable=invalid-name
def test_count_warnings_where_unmap_and_mapping(self):
"""The method should return True if unmap = true and mapping isset."""
self.handler.raw_value = u""
with patch('ggrc.models.all_models.Relationship.find_related',
side_effect=lambda args, opts: True):
self.handler.unmap = True
# pylint: disable=protected-access
self.handler._add_mapping_warning({}, {})
self.assertEqual(self.handler.row_converter.add_warning.call_count, 1)
def test_count_warnings_where_map_and_not_mapping(self):
"""The method should return True if unmap = false and mapping not is."""
self.handler.raw_value = u""
with patch('ggrc.models.all_models.Relationship.find_related',
side_effect=lambda args, opts: False):
self.handler.unmap = False
# pylint: disable=protected-access
self.handler._add_mapping_warning({}, {})
self.assertEqual(self.handler.row_converter.add_warning.call_count, 1)
def test_count_warnings_where_map_and_mapping(self):
"""The method should return True if unmap = false and mapping is."""
self.handler.raw_value = u""
with patch('ggrc.models.all_models.Relationship.find_related',
side_effect=lambda args, opts: True):
self.handler.unmap = False
# pylint: disable=protected-access
self.handler._add_mapping_warning({}, {})
self.assertEqual(self.handler.row_converter.add_warning.call_count, 0)
def test_count_warnings_where_unmap_and_not_mapping(self):
"""The method should return True if unmap = true and mapping not is."""
self.handler.raw_value = u""
with patch('ggrc.models.all_models.Relationship.find_related',
side_effect=lambda args, opts: False):
self.handler.unmap = True
# pylint: disable=protected-access
self.handler._add_mapping_warning({}, {})
self.assertEqual(self.handler.row_converter.add_warning.call_count, 0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
34,
8,
2864,
3012,
3457,
13,
198,
2,
49962,
739,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
127... | 2.78749 | 1,247 |
# Generated by Django 3.0.5 on 2020-06-07 11:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
20,
319,
12131,
12,
3312,
12,
2998,
1367,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
import os
import sys
import tempfile
import unittest
from io import StringIO
import numpy as np
import pytest
from mdgo.forcefield import *
test_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_files")
if __name__ == "__main__":
unittest.main()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
45243,
2188,
13,
3174,
3245,
1330,... | 2.732673 | 101 |
"""Provides support for django-tenat-schemas and django-tenants."""
try:
from django_tenants.utils import schema_context # NOQA: F401
except ImportError:
from tenant_schemas.utils import schema_context # NOQA: F401
| [
37811,
15946,
1460,
1104,
329,
42625,
14208,
12,
1452,
265,
12,
1416,
4411,
292,
290,
42625,
14208,
12,
1452,
1187,
526,
15931,
198,
28311,
25,
198,
220,
220,
220,
422,
42625,
14208,
62,
1452,
1187,
13,
26791,
1330,
32815,
62,
22866,
... | 2.848101 | 79 |
'''
isolve.py
Single variable inequality solver
'''
from sympy import Symbol, sympify, SympifyError
from sympy import solve_poly_inequality, solve_rational_inequalities
from sympy import solve_univariate_inequality, Poly
from sympy.core.relational import Relational, Equality
if __name__ == '__main__':
ineq = input('Enter the inequality to solve: ')
try:
ineq_obj = sympify(ineq)
except SympifyError:
print('Invalid inequality')
else:
# We check if the input expression is an inequality here
if isinstance(ineq_obj, Relational) and not isinstance(ineq_obj, Equality):
print(isolve(ineq_obj))
else:
print('Invalid inequality')
| [
7061,
6,
198,
271,
6442,
13,
9078,
198,
198,
28008,
7885,
12791,
1540,
332,
198,
7061,
6,
198,
198,
6738,
10558,
88,
1330,
38357,
11,
10558,
1958,
11,
1632,
3149,
1958,
12331,
198,
6738,
10558,
88,
1330,
8494,
62,
35428,
62,
500,
13... | 2.69962 | 263 |
from Compiler.types import sint, sfix
from Compiler.rabbit_lib import rabbit_sint, Mode as rabbit_mode
# in this case, for complex types, a register of type string would make sense.
# returns RELUX(x) on sfix as in pytorch
# X is a 3D matrix
# returns RELU(X) as in pytorch
# returns Matrices as it is the requeriment
# X is a 3D matrix
# returns RELU(X) as in pytorch
# returns Matrices as it is the requeriment
| [
6738,
3082,
5329,
13,
19199,
1330,
264,
600,
11,
264,
13049,
198,
6738,
3082,
5329,
13,
81,
14229,
62,
8019,
1330,
22746,
62,
82,
600,
11,
10363,
355,
22746,
62,
14171,
628,
628,
198,
220,
220,
220,
1303,
287,
428,
1339,
11,
329,
... | 3.014085 | 142 |
import contextlib
import copy
import functools
import logging
import threading
import torch
from torchdynamo.utils import checkpoint_params
from torchdynamo.utils import clone_inputs
from . import config
from . import convert_frame
from . import skipfiles
from . import utils
from .mutation_guard import install_generation_tagging_init
from .utils import same
log = logging.getLogger(__name__)
try:
from . import _eval_frame
except (ModuleNotFoundError, ImportError) as e:
raise RuntimeError("run `python setup.py develop` to compile C extensions") from e
set_eval_frame = _eval_frame.set_eval_frame
reset_code = _eval_frame.reset_code
unsupported = _eval_frame.unsupported
skip_code = _eval_frame.skip_code
set_guard_fail_hook = _eval_frame.set_guard_fail_hook
set_guard_error_hook = _eval_frame.set_guard_error_hook
always_optimize_code_objects = utils.ExactWeakKeyDictionary()
null_context = contextlib.nullcontext
unset = object()
compile_lock = threading.Lock()
def optimize(backend, nopython=False):
"""
The main entrypoint of TorchDynamo. Do graph capture and call
backend() to optimize extracted graphs.
Args:
backend: One of the two things:
- Either, a function/callable taking a torch.fx.GraphModule and
example_inputs and returning a python callable that runs the
graph faster.
One can also provide additional context for the backend, like
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
- Or, a string backend name in `torchdynamo.list_backends()`
nopython: If True, graph breaks will be errors and there will
be a single whole-program graph.
Example Usage:
@torchdynamo.optimize("ofi")
def toy_example(a, b):
...
or
with torchdynamo.optimize(my_compiler):
...
"""
backend_ctx_ctor = null_context
if hasattr(backend, "backend_ctx_ctor"):
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor")
if nopython:
return optimize_assert(backend, backend_ctx_ctor)
return _optimize_catch_errors(
convert_frame.convert_frame(backend), backend_ctx_ctor
)
def optimize_assert(backend, backend_ctx_ctor=null_context):
"""
The same as `torchdynamo.optimize(backend, nopython=True)`
"""
return _optimize_catch_errors(
convert_frame.convert_frame_assert(backend), backend_ctx_ctor
)
def run(fn=None):
"""Don't do any dynamic compiles, just use prior optimizations"""
if fn is not None:
assert callable(fn)
return RunOnlyContext()(fn)
return RunOnlyContext()
def disable(fn=None):
"""Decorator and context manager to disable TorchDynamo"""
if fn is not None:
assert callable(fn)
return DisableContext()(fn)
return DisableContext()
def skip(fn=None):
"""
Skip frames associated with the function code, but still process recursively
invoked frames
"""
if fn is None:
return skip
assert callable(fn)
skip_code(fn.__code__)
fn._torchdynamo_disable = True
return fn
| [
11748,
4732,
8019,
198,
11748,
4866,
198,
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
11748,
4704,
278,
198,
198,
11748,
28034,
198,
198,
6738,
28034,
67,
4989,
78,
13,
26791,
1330,
26954,
62,
37266,
198,
6738,
28034,
67,
4989,
78,... | 2.66968 | 1,217 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/grid/messages/dataset_messages.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/grid/messages/dataset_messages.proto",
package="syft.grid.messages",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n*proto/grid/messages/dataset_messages.proto\x12\x12syft.grid.messages\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\x9f\x01\n\x14\x43reateDatasetMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8c\x01\n\x15\x43reateDatasetResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9c\x01\n\x11GetDatasetMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x89\x01\n\x12GetDatasetResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9d\x01\n\x12GetDatasetsMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8a\x01\n\x13GetDatasetsResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9f\x01\n\x14SearchDatasetMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8c\x01\n\x15SearchDatasetResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9f\x01\n\x14UpdateDatasetMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8c\x01\n\x15UpdateDatasetResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9f\x01\n\x14\x44\x65leteDatasetMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8c\x01\n\x15\x44\x65leteDatasetResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Addressb\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
proto_dot_core_dot_io_dot_address__pb2.DESCRIPTOR,
],
)
_CREATEDATASETMESSAGE = _descriptor.Descriptor(
name="CreateDatasetMessage",
full_name="syft.grid.messages.CreateDatasetMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.CreateDatasetMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.CreateDatasetMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.CreateDatasetMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.CreateDatasetMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=135,
serialized_end=294,
)
_CREATEDATASETRESPONSE = _descriptor.Descriptor(
name="CreateDatasetResponse",
full_name="syft.grid.messages.CreateDatasetResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.CreateDatasetResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.CreateDatasetResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.CreateDatasetResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.CreateDatasetResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=297,
serialized_end=437,
)
_GETDATASETMESSAGE = _descriptor.Descriptor(
name="GetDatasetMessage",
full_name="syft.grid.messages.GetDatasetMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.GetDatasetMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.GetDatasetMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.GetDatasetMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.GetDatasetMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=440,
serialized_end=596,
)
_GETDATASETRESPONSE = _descriptor.Descriptor(
name="GetDatasetResponse",
full_name="syft.grid.messages.GetDatasetResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.GetDatasetResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.GetDatasetResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.GetDatasetResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.GetDatasetResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=599,
serialized_end=736,
)
_GETDATASETSMESSAGE = _descriptor.Descriptor(
name="GetDatasetsMessage",
full_name="syft.grid.messages.GetDatasetsMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.GetDatasetsMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.GetDatasetsMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.GetDatasetsMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.GetDatasetsMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=739,
serialized_end=896,
)
_GETDATASETSRESPONSE = _descriptor.Descriptor(
name="GetDatasetsResponse",
full_name="syft.grid.messages.GetDatasetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.GetDatasetsResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.GetDatasetsResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.GetDatasetsResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.GetDatasetsResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=899,
serialized_end=1037,
)
_SEARCHDATASETMESSAGE = _descriptor.Descriptor(
name="SearchDatasetMessage",
full_name="syft.grid.messages.SearchDatasetMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.SearchDatasetMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.SearchDatasetMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.SearchDatasetMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.SearchDatasetMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1040,
serialized_end=1199,
)
_SEARCHDATASETRESPONSE = _descriptor.Descriptor(
name="SearchDatasetResponse",
full_name="syft.grid.messages.SearchDatasetResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.SearchDatasetResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.SearchDatasetResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.SearchDatasetResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.SearchDatasetResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1202,
serialized_end=1342,
)
_UPDATEDATASETMESSAGE = _descriptor.Descriptor(
name="UpdateDatasetMessage",
full_name="syft.grid.messages.UpdateDatasetMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.UpdateDatasetMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.UpdateDatasetMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.UpdateDatasetMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.UpdateDatasetMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1345,
serialized_end=1504,
)
_UPDATEDATASETRESPONSE = _descriptor.Descriptor(
name="UpdateDatasetResponse",
full_name="syft.grid.messages.UpdateDatasetResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.UpdateDatasetResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.UpdateDatasetResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.UpdateDatasetResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.UpdateDatasetResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1507,
serialized_end=1647,
)
_DELETEDATASETMESSAGE = _descriptor.Descriptor(
name="DeleteDatasetMessage",
full_name="syft.grid.messages.DeleteDatasetMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.DeleteDatasetMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.DeleteDatasetMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.DeleteDatasetMessage.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reply_to",
full_name="syft.grid.messages.DeleteDatasetMessage.reply_to",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1650,
serialized_end=1809,
)
_DELETEDATASETRESPONSE = _descriptor.Descriptor(
name="DeleteDatasetResponse",
full_name="syft.grid.messages.DeleteDatasetResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.grid.messages.DeleteDatasetResponse.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="status_code",
full_name="syft.grid.messages.DeleteDatasetResponse.status_code",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="content",
full_name="syft.grid.messages.DeleteDatasetResponse.content",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.grid.messages.DeleteDatasetResponse.address",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1812,
serialized_end=1952,
)
_CREATEDATASETMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_CREATEDATASETMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_CREATEDATASETMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_CREATEDATASETRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_CREATEDATASETRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GETDATASETMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GETDATASETRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETSMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GETDATASETSMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETSMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_GETDATASETSRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GETDATASETSRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_SEARCHDATASETMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_SEARCHDATASETMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_SEARCHDATASETMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_SEARCHDATASETRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_SEARCHDATASETRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_UPDATEDATASETMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_UPDATEDATASETMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_UPDATEDATASETMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_UPDATEDATASETRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_UPDATEDATASETRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_DELETEDATASETMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_DELETEDATASETMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_DELETEDATASETMESSAGE.fields_by_name[
"reply_to"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_DELETEDATASETRESPONSE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_DELETEDATASETRESPONSE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
DESCRIPTOR.message_types_by_name["CreateDatasetMessage"] = _CREATEDATASETMESSAGE
DESCRIPTOR.message_types_by_name["CreateDatasetResponse"] = _CREATEDATASETRESPONSE
DESCRIPTOR.message_types_by_name["GetDatasetMessage"] = _GETDATASETMESSAGE
DESCRIPTOR.message_types_by_name["GetDatasetResponse"] = _GETDATASETRESPONSE
DESCRIPTOR.message_types_by_name["GetDatasetsMessage"] = _GETDATASETSMESSAGE
DESCRIPTOR.message_types_by_name["GetDatasetsResponse"] = _GETDATASETSRESPONSE
DESCRIPTOR.message_types_by_name["SearchDatasetMessage"] = _SEARCHDATASETMESSAGE
DESCRIPTOR.message_types_by_name["SearchDatasetResponse"] = _SEARCHDATASETRESPONSE
DESCRIPTOR.message_types_by_name["UpdateDatasetMessage"] = _UPDATEDATASETMESSAGE
DESCRIPTOR.message_types_by_name["UpdateDatasetResponse"] = _UPDATEDATASETRESPONSE
DESCRIPTOR.message_types_by_name["DeleteDatasetMessage"] = _DELETEDATASETMESSAGE
DESCRIPTOR.message_types_by_name["DeleteDatasetResponse"] = _DELETEDATASETRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateDatasetMessage = _reflection.GeneratedProtocolMessageType(
"CreateDatasetMessage",
(_message.Message,),
{
"DESCRIPTOR": _CREATEDATASETMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.CreateDatasetMessage)
},
)
_sym_db.RegisterMessage(CreateDatasetMessage)
CreateDatasetResponse = _reflection.GeneratedProtocolMessageType(
"CreateDatasetResponse",
(_message.Message,),
{
"DESCRIPTOR": _CREATEDATASETRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.CreateDatasetResponse)
},
)
_sym_db.RegisterMessage(CreateDatasetResponse)
GetDatasetMessage = _reflection.GeneratedProtocolMessageType(
"GetDatasetMessage",
(_message.Message,),
{
"DESCRIPTOR": _GETDATASETMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.GetDatasetMessage)
},
)
_sym_db.RegisterMessage(GetDatasetMessage)
GetDatasetResponse = _reflection.GeneratedProtocolMessageType(
"GetDatasetResponse",
(_message.Message,),
{
"DESCRIPTOR": _GETDATASETRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.GetDatasetResponse)
},
)
_sym_db.RegisterMessage(GetDatasetResponse)
GetDatasetsMessage = _reflection.GeneratedProtocolMessageType(
"GetDatasetsMessage",
(_message.Message,),
{
"DESCRIPTOR": _GETDATASETSMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.GetDatasetsMessage)
},
)
_sym_db.RegisterMessage(GetDatasetsMessage)
GetDatasetsResponse = _reflection.GeneratedProtocolMessageType(
"GetDatasetsResponse",
(_message.Message,),
{
"DESCRIPTOR": _GETDATASETSRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.GetDatasetsResponse)
},
)
_sym_db.RegisterMessage(GetDatasetsResponse)
SearchDatasetMessage = _reflection.GeneratedProtocolMessageType(
"SearchDatasetMessage",
(_message.Message,),
{
"DESCRIPTOR": _SEARCHDATASETMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.SearchDatasetMessage)
},
)
_sym_db.RegisterMessage(SearchDatasetMessage)
SearchDatasetResponse = _reflection.GeneratedProtocolMessageType(
"SearchDatasetResponse",
(_message.Message,),
{
"DESCRIPTOR": _SEARCHDATASETRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.SearchDatasetResponse)
},
)
_sym_db.RegisterMessage(SearchDatasetResponse)
UpdateDatasetMessage = _reflection.GeneratedProtocolMessageType(
"UpdateDatasetMessage",
(_message.Message,),
{
"DESCRIPTOR": _UPDATEDATASETMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.UpdateDatasetMessage)
},
)
_sym_db.RegisterMessage(UpdateDatasetMessage)
UpdateDatasetResponse = _reflection.GeneratedProtocolMessageType(
"UpdateDatasetResponse",
(_message.Message,),
{
"DESCRIPTOR": _UPDATEDATASETRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.UpdateDatasetResponse)
},
)
_sym_db.RegisterMessage(UpdateDatasetResponse)
DeleteDatasetMessage = _reflection.GeneratedProtocolMessageType(
"DeleteDatasetMessage",
(_message.Message,),
{
"DESCRIPTOR": _DELETEDATASETMESSAGE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.DeleteDatasetMessage)
},
)
_sym_db.RegisterMessage(DeleteDatasetMessage)
DeleteDatasetResponse = _reflection.GeneratedProtocolMessageType(
"DeleteDatasetResponse",
(_message.Message,),
{
"DESCRIPTOR": _DELETEDATASETRESPONSE,
"__module__": "proto.grid.messages.dataset_messages_pb2"
# @@protoc_insertion_point(class_scope:syft.grid.messages.DeleteDatasetResponse)
},
)
_sym_db.RegisterMessage(DeleteDatasetResponse)
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
44876,
14,
25928,
14,
37348,
1095,
14,
19608,
292,
316,
62,
37348,
1095,
... | 1.886207 | 26,021 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 DistilBERT model
"""
import logging
import math
import numpy as np
import tensorflow as tf
from .configuration_distilbert import DistilBertConfig
from .file_utils import MULTIPLE_CHOICE_DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSharedEmbeddings,
TFTokenClassificationLoss,
cast_bool_to_primitive,
get_initializer,
keras_serializable,
shape_list,
)
from .tokenization_utils import BatchEncoding
logger = logging.getLogger(__name__)
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"distilbert-base-uncased",
"distilbert-base-uncased-distilled-squad",
"distilbert-base-cased",
"distilbert-base-cased-distilled-squad",
"distilbert-base-multilingual-cased",
"distilbert-base-uncased-finetuned-sst-2-english",
# See all DistilBERT models at https://huggingface.co/models?filter=distilbert
]
# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
def gelu(x):
""" Gaussian Error Linear Unit.
Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
@keras_serializable
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class TFDistilBertPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = DistilBertConfig
base_model_prefix = "distilbert"
DISTILBERT_START_DOCSTRING = r"""
This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, embedding_dim)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
output_attentions (:obj:`bool`, `optional`, defaults to `:obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
"The bare DistilBERT encoder/transformer outputing raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING,
)
@add_start_docstrings(
"""DistilBert Model with a `masked language modeling` head on top. """, DISTILBERT_START_DOCSTRING,
)
@add_start_docstrings(
"""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
DISTILBERT_START_DOCSTRING,
)
@add_start_docstrings(
"""DistilBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
DISTILBERT_START_DOCSTRING,
)
@add_start_docstrings(
"""DistilBert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
DISTILBERT_START_DOCSTRING,
)
@add_start_docstrings(
"""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
DISTILBERT_START_DOCSTRING,
)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
13130,
12,
25579,
11,
262,
12905,
2667,
32388,
3457,
13,
1074,
11,
383,
3012,
9552,
15417,
4816,
290,
3203,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
1... | 2.770107 | 2,984 |
"""
JSON 2 HTML convertor
=====================
(c) Varun Malhotra 2013
http://softvar.github.io
Original Source Code: https://github.com/softvar/json2html-flask
------------
LICENSE: MIT
--------
"""
# -*- coding: utf-8 -*-
import json
import collections
import html.parser
from flask import json
from flask import Flask
from flask import request
from flask import render_template
from json2html import jsonconv
app = Flask(__name__)
@app.route('/')
@app.route('/', methods=['POST'])
def my_form_post():
"""
receive submitted data and process
"""
text = request.form['text']
checkbox = request.form['users']
style=""
if(checkbox=="1"):
style="table table-condensed table-bordered table-hover"
else:
style='border="1"'
#json_input = json.dumps(text)
try:
ordered_json = json.loads(text, object_pairs_hook=collections.OrderedDict)
processed_text = jsonconv.json2html.convert(ordered_json,style)
html_parser = html.parser.HTMLParser()
return render_template("index.html", processed_text=html_parser.unescape(processed_text),pro = text)
except:
return render_template("index.html",error="Error Parsing JSON ! Please check your JSON syntax",pro=text)
if __name__ == '__main__':
app.run(debug = True)
| [
37811,
198,
40386,
362,
11532,
10385,
273,
198,
4770,
1421,
28,
198,
198,
7,
66,
8,
12372,
403,
4434,
8940,
430,
2211,
198,
4023,
1378,
4215,
7785,
13,
12567,
13,
952,
198,
198,
20556,
8090,
6127,
25,
3740,
1378,
12567,
13,
785,
14,... | 2.764211 | 475 |
from django.contrib.auth.models import User
from django.db import models
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
220,
628,
628
] | 3.347826 | 23 |
ord_names = {
2:'SysAllocString',
3:'SysReAllocString',
4:'SysAllocStringLen',
5:'SysReAllocStringLen',
6:'SysFreeString',
7:'SysStringLen',
8:'VariantInit',
9:'VariantClear',
10:'VariantCopy',
11:'VariantCopyInd',
12:'VariantChangeType',
13:'VariantTimeToDosDateTime',
14:'DosDateTimeToVariantTime',
15:'SafeArrayCreate',
16:'SafeArrayDestroy',
17:'SafeArrayGetDim',
18:'SafeArrayGetElemsize',
19:'SafeArrayGetUBound',
20:'SafeArrayGetLBound',
21:'SafeArrayLock',
22:'SafeArrayUnlock',
23:'SafeArrayAccessData',
24:'SafeArrayUnaccessData',
25:'SafeArrayGetElement',
26:'SafeArrayPutElement',
27:'SafeArrayCopy',
28:'DispGetParam',
29:'DispGetIDsOfNames',
30:'DispInvoke',
31:'CreateDispTypeInfo',
32:'CreateStdDispatch',
33:'RegisterActiveObject',
34:'RevokeActiveObject',
35:'GetActiveObject',
36:'SafeArrayAllocDescriptor',
37:'SafeArrayAllocData',
38:'SafeArrayDestroyDescriptor',
39:'SafeArrayDestroyData',
40:'SafeArrayRedim',
41:'SafeArrayAllocDescriptorEx',
42:'SafeArrayCreateEx',
43:'SafeArrayCreateVectorEx',
44:'SafeArraySetRecordInfo',
45:'SafeArrayGetRecordInfo',
46:'VarParseNumFromStr',
47:'VarNumFromParseNum',
48:'VarI2FromUI1',
49:'VarI2FromI4',
50:'VarI2FromR4',
51:'VarI2FromR8',
52:'VarI2FromCy',
53:'VarI2FromDate',
54:'VarI2FromStr',
55:'VarI2FromDisp',
56:'VarI2FromBool',
57:'SafeArraySetIID',
58:'VarI4FromUI1',
59:'VarI4FromI2',
60:'VarI4FromR4',
61:'VarI4FromR8',
62:'VarI4FromCy',
63:'VarI4FromDate',
64:'VarI4FromStr',
65:'VarI4FromDisp',
66:'VarI4FromBool',
67:'SafeArrayGetIID',
68:'VarR4FromUI1',
69:'VarR4FromI2',
70:'VarR4FromI4',
71:'VarR4FromR8',
72:'VarR4FromCy',
73:'VarR4FromDate',
74:'VarR4FromStr',
75:'VarR4FromDisp',
76:'VarR4FromBool',
77:'SafeArrayGetVartype',
78:'VarR8FromUI1',
79:'VarR8FromI2',
80:'VarR8FromI4',
81:'VarR8FromR4',
82:'VarR8FromCy',
83:'VarR8FromDate',
84:'VarR8FromStr',
85:'VarR8FromDisp',
86:'VarR8FromBool',
87:'VarFormat',
88:'VarDateFromUI1',
89:'VarDateFromI2',
90:'VarDateFromI4',
91:'VarDateFromR4',
92:'VarDateFromR8',
93:'VarDateFromCy',
94:'VarDateFromStr',
95:'VarDateFromDisp',
96:'VarDateFromBool',
97:'VarFormatDateTime',
98:'VarCyFromUI1',
99:'VarCyFromI2',
100:'VarCyFromI4',
101:'VarCyFromR4',
102:'VarCyFromR8',
103:'VarCyFromDate',
104:'VarCyFromStr',
105:'VarCyFromDisp',
106:'VarCyFromBool',
107:'VarFormatNumber',
108:'VarBstrFromUI1',
109:'VarBstrFromI2',
110:'VarBstrFromI4',
111:'VarBstrFromR4',
112:'VarBstrFromR8',
113:'VarBstrFromCy',
114:'VarBstrFromDate',
115:'VarBstrFromDisp',
116:'VarBstrFromBool',
117:'VarFormatPercent',
118:'VarBoolFromUI1',
119:'VarBoolFromI2',
120:'VarBoolFromI4',
121:'VarBoolFromR4',
122:'VarBoolFromR8',
123:'VarBoolFromDate',
124:'VarBoolFromCy',
125:'VarBoolFromStr',
126:'VarBoolFromDisp',
127:'VarFormatCurrency',
128:'VarWeekdayName',
129:'VarMonthName',
130:'VarUI1FromI2',
131:'VarUI1FromI4',
132:'VarUI1FromR4',
133:'VarUI1FromR8',
134:'VarUI1FromCy',
135:'VarUI1FromDate',
136:'VarUI1FromStr',
137:'VarUI1FromDisp',
138:'VarUI1FromBool',
139:'VarFormatFromTokens',
140:'VarTokenizeFormatString',
141:'VarAdd',
142:'VarAnd',
143:'VarDiv',
144:'DllCanUnloadNow',
145:'DllGetClassObject',
146:'DispCallFunc',
147:'VariantChangeTypeEx',
148:'SafeArrayPtrOfIndex',
149:'SysStringByteLen',
150:'SysAllocStringByteLen',
151:'DllRegisterServer',
152:'VarEqv',
153:'VarIdiv',
154:'VarImp',
155:'VarMod',
156:'VarMul',
157:'VarOr',
158:'VarPow',
159:'VarSub',
160:'CreateTypeLib',
161:'LoadTypeLib',
162:'LoadRegTypeLib',
163:'RegisterTypeLib',
164:'QueryPathOfRegTypeLib',
165:'LHashValOfNameSys',
166:'LHashValOfNameSysA',
167:'VarXor',
168:'VarAbs',
169:'VarFix',
170:'OaBuildVersion',
171:'ClearCustData',
172:'VarInt',
173:'VarNeg',
174:'VarNot',
175:'VarRound',
176:'VarCmp',
177:'VarDecAdd',
178:'VarDecDiv',
179:'VarDecMul',
180:'CreateTypeLib2',
181:'VarDecSub',
182:'VarDecAbs',
183:'LoadTypeLibEx',
184:'SystemTimeToVariantTime',
185:'VariantTimeToSystemTime',
186:'UnRegisterTypeLib',
187:'VarDecFix',
188:'VarDecInt',
189:'VarDecNeg',
190:'VarDecFromUI1',
191:'VarDecFromI2',
192:'VarDecFromI4',
193:'VarDecFromR4',
194:'VarDecFromR8',
195:'VarDecFromDate',
196:'VarDecFromCy',
197:'VarDecFromStr',
198:'VarDecFromDisp',
199:'VarDecFromBool',
200:'GetErrorInfo',
201:'SetErrorInfo',
202:'CreateErrorInfo',
203:'VarDecRound',
204:'VarDecCmp',
205:'VarI2FromI1',
206:'VarI2FromUI2',
207:'VarI2FromUI4',
208:'VarI2FromDec',
209:'VarI4FromI1',
210:'VarI4FromUI2',
211:'VarI4FromUI4',
212:'VarI4FromDec',
213:'VarR4FromI1',
214:'VarR4FromUI2',
215:'VarR4FromUI4',
216:'VarR4FromDec',
217:'VarR8FromI1',
218:'VarR8FromUI2',
219:'VarR8FromUI4',
220:'VarR8FromDec',
221:'VarDateFromI1',
222:'VarDateFromUI2',
223:'VarDateFromUI4',
224:'VarDateFromDec',
225:'VarCyFromI1',
226:'VarCyFromUI2',
227:'VarCyFromUI4',
228:'VarCyFromDec',
229:'VarBstrFromI1',
230:'VarBstrFromUI2',
231:'VarBstrFromUI4',
232:'VarBstrFromDec',
233:'VarBoolFromI1',
234:'VarBoolFromUI2',
235:'VarBoolFromUI4',
236:'VarBoolFromDec',
237:'VarUI1FromI1',
238:'VarUI1FromUI2',
239:'VarUI1FromUI4',
240:'VarUI1FromDec',
241:'VarDecFromI1',
242:'VarDecFromUI2',
243:'VarDecFromUI4',
244:'VarI1FromUI1',
245:'VarI1FromI2',
246:'VarI1FromI4',
247:'VarI1FromR4',
248:'VarI1FromR8',
249:'VarI1FromDate',
250:'VarI1FromCy',
251:'VarI1FromStr',
252:'VarI1FromDisp',
253:'VarI1FromBool',
254:'VarI1FromUI2',
255:'VarI1FromUI4',
256:'VarI1FromDec',
257:'VarUI2FromUI1',
258:'VarUI2FromI2',
259:'VarUI2FromI4',
260:'VarUI2FromR4',
261:'VarUI2FromR8',
262:'VarUI2FromDate',
263:'VarUI2FromCy',
264:'VarUI2FromStr',
265:'VarUI2FromDisp',
266:'VarUI2FromBool',
267:'VarUI2FromI1',
268:'VarUI2FromUI4',
269:'VarUI2FromDec',
270:'VarUI4FromUI1',
271:'VarUI4FromI2',
272:'VarUI4FromI4',
273:'VarUI4FromR4',
274:'VarUI4FromR8',
275:'VarUI4FromDate',
276:'VarUI4FromCy',
277:'VarUI4FromStr',
278:'VarUI4FromDisp',
279:'VarUI4FromBool',
280:'VarUI4FromI1',
281:'VarUI4FromUI2',
282:'VarUI4FromDec',
283:'BSTR_UserSize',
284:'BSTR_UserMarshal',
285:'BSTR_UserUnmarshal',
286:'BSTR_UserFree',
287:'VARIANT_UserSize',
288:'VARIANT_UserMarshal',
289:'VARIANT_UserUnmarshal',
290:'VARIANT_UserFree',
291:'LPSAFEARRAY_UserSize',
292:'LPSAFEARRAY_UserMarshal',
293:'LPSAFEARRAY_UserUnmarshal',
294:'LPSAFEARRAY_UserFree',
295:'LPSAFEARRAY_Size',
296:'LPSAFEARRAY_Marshal',
297:'LPSAFEARRAY_Unmarshal',
298:'VarDecCmpR8',
299:'VarCyAdd',
300:'DllUnregisterServer',
301:'OACreateTypeLib2',
303:'VarCyMul',
304:'VarCyMulI4',
305:'VarCySub',
306:'VarCyAbs',
307:'VarCyFix',
308:'VarCyInt',
309:'VarCyNeg',
310:'VarCyRound',
311:'VarCyCmp',
312:'VarCyCmpR8',
313:'VarBstrCat',
314:'VarBstrCmp',
315:'VarR8Pow',
316:'VarR4CmpR8',
317:'VarR8Round',
318:'VarCat',
319:'VarDateFromUdateEx',
322:'GetRecordInfoFromGuids',
323:'GetRecordInfoFromTypeInfo',
325:'SetVarConversionLocaleSetting',
326:'GetVarConversionLocaleSetting',
327:'SetOaNoCache',
329:'VarCyMulI8',
330:'VarDateFromUdate',
331:'VarUdateFromDate',
332:'GetAltMonthNames',
333:'VarI8FromUI1',
334:'VarI8FromI2',
335:'VarI8FromR4',
336:'VarI8FromR8',
337:'VarI8FromCy',
338:'VarI8FromDate',
339:'VarI8FromStr',
340:'VarI8FromDisp',
341:'VarI8FromBool',
342:'VarI8FromI1',
343:'VarI8FromUI2',
344:'VarI8FromUI4',
345:'VarI8FromDec',
346:'VarI2FromI8',
347:'VarI2FromUI8',
348:'VarI4FromI8',
349:'VarI4FromUI8',
360:'VarR4FromI8',
361:'VarR4FromUI8',
362:'VarR8FromI8',
363:'VarR8FromUI8',
364:'VarDateFromI8',
365:'VarDateFromUI8',
366:'VarCyFromI8',
367:'VarCyFromUI8',
368:'VarBstrFromI8',
369:'VarBstrFromUI8',
370:'VarBoolFromI8',
371:'VarBoolFromUI8',
372:'VarUI1FromI8',
373:'VarUI1FromUI8',
374:'VarDecFromI8',
375:'VarDecFromUI8',
376:'VarI1FromI8',
377:'VarI1FromUI8',
378:'VarUI2FromI8',
379:'VarUI2FromUI8',
401:'OleLoadPictureEx',
402:'OleLoadPictureFileEx',
411:'SafeArrayCreateVector',
412:'SafeArrayCopyData',
413:'VectorFromBstr',
414:'BstrFromVector',
415:'OleIconToCursor',
416:'OleCreatePropertyFrameIndirect',
417:'OleCreatePropertyFrame',
418:'OleLoadPicture',
419:'OleCreatePictureIndirect',
420:'OleCreateFontIndirect',
421:'OleTranslateColor',
422:'OleLoadPictureFile',
423:'OleSavePictureFile',
424:'OleLoadPicturePath',
425:'VarUI4FromI8',
426:'VarUI4FromUI8',
427:'VarI8FromUI8',
428:'VarUI8FromI8',
429:'VarUI8FromUI1',
430:'VarUI8FromI2',
431:'VarUI8FromR4',
432:'VarUI8FromR8',
433:'VarUI8FromCy',
434:'VarUI8FromDate',
435:'VarUI8FromStr',
436:'VarUI8FromDisp',
437:'VarUI8FromBool',
438:'VarUI8FromI1',
439:'VarUI8FromUI2',
440:'VarUI8FromUI4',
441:'VarUI8FromDec',
442:'RegisterTypeLibForUser',
443:'UnRegisterTypeLibForUser',
}
| [
585,
62,
14933,
796,
1391,
198,
220,
220,
220,
362,
32105,
44387,
3237,
420,
10100,
3256,
198,
220,
220,
220,
513,
32105,
44387,
3041,
3237,
420,
10100,
3256,
198,
220,
220,
220,
604,
32105,
44387,
3237,
420,
10100,
30659,
3256,
198,
... | 2.085868 | 4,833 |
import os
import music21 as m21
KERN_DATASET_PATH = "path/to/dataset"
# durations are expressed in quarter length
ACCEPTABLE_DURATIONS = [
0.25, # 16th note
0.5, # 8th note
0.75,
1.0, # quarter note
1.5,
2, # half note
3,
4 # whole note
]
def load_songs_in_kern(dataset_path):
"""Loads all kern pieces in dataset using music21.
:param dataset_path (str): Path to dataset
:return songs (list of m21 streams): List containing all pieces
"""
songs = []
# go through all the files in dataset and load them with music21
for path, subdirs, files in os.walk(dataset_path):
for file in files:
# consider only kern files
if file[-3:] == "krn":
song = m21.converter.parse(os.path.join(path, file))
songs.append(song)
return songs
def has_acceptable_durations(song, acceptable_durations):
"""Boolean routine that returns True if piece has all acceptable duration, False otherwise.
:param song (m21 stream):
:param acceptable_durations (list): List of acceptable duration in quarter length
:return (bool):
"""
for note in song.flat.notesAndRests:
if note.duration.quarterLength not in acceptable_durations:
return False
return True
def transpose(song):
"""Transposes song to C maj/A min
:param piece (m21 stream): Piece to transpose
:return transposed_song (m21 stream):
"""
# get key from the song
parts = song.getElementsByClass(m21.stream.Part)
measures_part0 = parts[0].getElementsByClass(m21.stream.Measure)
key = measures_part0[0][4]
# estimate key using music21
if not isinstance(key, m21.key.Key):
key = song.analyze("key")
# get interval for transposition. E.g., Bmaj -> Cmaj
if key.mode == "major":
interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("C"))
elif key.mode == "minor":
interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("A"))
# transpose song by calculated interval
tranposed_song = song.transpose(interval)
return tranposed_song
if __name__ == "__main__":
# load songs
songs = load_songs_in_kern(KERN_DATASET_PATH)
print(f"Loaded {len(songs)} songs.")
song = songs[0]
print(f"Has acceptable duration? {has_acceptable_durations(song, ACCEPTABLE_DURATIONS)}")
# transpose song
transposed_song = transpose(song)
song.show()
transposed_song.show()
| [
11748,
28686,
198,
11748,
2647,
2481,
355,
285,
2481,
198,
198,
42,
28778,
62,
35,
1404,
1921,
2767,
62,
34219,
796,
366,
6978,
14,
1462,
14,
19608,
292,
316,
1,
198,
198,
2,
288,
20074,
389,
6241,
287,
3860,
4129,
198,
2246,
5222,
... | 2.526263 | 990 |
from adapters.bigdata.spark.spark_submit.cli_main import register_adapter_impl
register_adapter_impl() | [
6738,
46363,
13,
14261,
7890,
13,
2777,
668,
13,
2777,
668,
62,
46002,
13,
44506,
62,
12417,
1330,
7881,
62,
324,
3429,
62,
23928,
198,
198,
30238,
62,
324,
3429,
62,
23928,
3419
] | 3.121212 | 33 |
#!/usr/bin/env python
from sys import argv
result = 0
for c in argv[1]:
result += ord(c)
print(result)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
20274,
796,
657,
198,
1640,
269,
287,
1822,
85,
58,
16,
5974,
198,
220,
220,
220,
1255,
15853,
2760,
7,
66,
8,
198,
198,
4798,
7,
20274,
8,
... | 2.422222 | 45 |
from pydemic.testing import en
| [
6738,
279,
5173,
5314,
13,
33407,
1330,
551,
628
] | 3.555556 | 9 |
from unittest import mock
import pytest
from account import schema
def test_log_in_invalid_credentials(db):
"""
If the provided credentials don't match a user, an exception should
be raised.
"""
mutation = schema.LogIn()
with pytest.raises(Exception):
mutation.mutate(None, email="fake@example.com", password="password")
@mock.patch("account.schema.login", autospec=True)
def test_log_in_valid_credentials(mock_login, rf, user_factory):
"""
If valid credentials are provided to the mutation, the user with
those credentials should be logged in and returned.
"""
mutation = schema.LogIn()
password = "password"
user = user_factory(password=password)
request = rf.post("/")
result = mutation.mutate(
DummyInfo(request), email=user.email, password=password
)
assert mock_login.call_count == 1
assert result.user == user
| [
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1848,
1330,
32815,
628,
198,
198,
4299,
1332,
62,
6404,
62,
259,
62,
259,
12102,
62,
66,
445,
14817,
7,
9945,
2599,
198,
220,
220,
220,
37227,
198,
220,... | 2.868339 | 319 |
import sys
from UnitTest import UnitTest, IN_BROWSER
# syntax check
# import a, b, c
if True:
import imports.circ1
from imports import exec_order, imports as IMPORTS
from imports import exec_order as EXEC_ORDER
import I18N
from imports.classes import WithAttribute
import imports.decors # must be in this form
global names
names = {}
# testMetaClass
# testClassVars
# Global variable to test variable selection order
x = 'global test'
# testInheritedProperties
# testInheritedPropertyObjects
# testInheritedConstructors
# XXX doing this should throw a "Name" exception
#
#class ExampleSubclassDefinedBeforeSuperclass(ExampleSuperclassDefinedAfterSubclass):
# pass
#class ExampleSuperclassDefinedAfterSubclass:
# def someMethod(self):
# return 'abc'
global gclasses
gclasses = {}
revealAccessLog = None
# Property class that gives python 2.5 a setter and a deleter
# Bug in pyjs that appears when the next lines are executed
# The 'property = Property' makes property a module variable, which is
# not set if the next line not is executed
property = property
if not hasattr(property, 'setter'):
# Replace python 2.5 property class
property = Property
############################################################################
# generic decoerators for methods
############################################################################
| [
11748,
25064,
198,
6738,
11801,
14402,
1330,
11801,
14402,
11,
3268,
62,
11473,
22845,
1137,
198,
198,
2,
15582,
2198,
198,
2,
1330,
257,
11,
275,
11,
269,
198,
361,
6407,
25,
198,
220,
220,
220,
1330,
17944,
13,
21170,
16,
198,
673... | 3.708556 | 374 |
from .nms import boxes_nms | [
6738,
764,
77,
907,
1330,
10559,
62,
77,
907
] | 2.888889 | 9 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 21:46:50 2020
@author: pengning
"""
import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt
from .shell_domain import shell_rho_M, shell_rho_N
import mpmath
from mpmath import mp
from .dipole_field import mp_spherical_jn, mp_vec_spherical_jn, mp_spherical_yn, mp_vec_spherical_yn, mp_vec_spherical_djn, mp_vec_spherical_dyn
from .spherical_Green_Taylor_Arnoldi_speedup import mp_re, mp_im
from .shell_Green_Taylor_Arnoldi_spatialDiscretization import complex_to_mp, grid_integrate_trap, rgrid_Mmn_plot,rgrid_Nmn_plot, rgrid_Mmn_normsqr, rgrid_Nmn_normsqr, rgrid_Mmn_vdot,rgrid_Nmn_vdot
def shell_Green_grid_Mmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, vecMgrid):
"""
evaluates G(r,r')*vecM(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgMvecMrsqr_grid = RgMgrid*vecMgrid*rsqrgrid
Im_newvecMgrid = k**3 * grid_integrate_trap(RgMvecMrsqr_grid, rdiffgrid) * RgMgrid
Re_ImMfactgrid = np.zeros_like(rsqrgrid, dtype=type(mp.one))
Re_ImMfactgrid[1:] = k**3 * np.cumsum((RgMvecMrsqr_grid[:-1]+RgMvecMrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImMvecMrsqr_grid = np.flip(ImMgrid*vecMgrid*rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgMfactgrid = np.zeros_like(rsqrgrid, dtype=type(mp.one))
Re_RgMfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImMvecMrsqr_grid[:-1]+rev_ImMvecMrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecMgrid = -ImMgrid*Re_ImMfactgrid - RgMgrid*Re_RgMfactgrid
return Re_newvecMgrid + 1j*Im_newvecMgrid
def shell_Green_grid_Arnoldi_RgandImMmn_step_mp(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgMgrid, ImMgrid, unitMvecs, Gmat, plotVectors=False):
"""
using a mpf valued grid
this method does one more Arnoldi step, given existing Arnoldi vectors in unitMvecs
the last two entries in unitMvecs is unitMvecs[-2]=G*unitMvecs[-4] and unitMvecs[-1]=G*unitMvecs[-3] without orthogonalization and normalization
its indices -1 and -3 because we are alternatingly generating new vectors starting from either the RgM line or the ImM line
so len(unitMvecs) = len(Gmat)+2 going in and going out of the method
this is setup for most efficient iteration since G*unitMvec is only computed once
the unitMvecs list is modified on spot; a new enlarged Gmat nparray is returned at the end
for each iteration we only advance Gmat by 1 row and 1 column
Gmat here is an mpmatrix
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
vecnum = Gmat.rows
for i in range(vecnum):
coef = Gmat[i,vecnum-2]
unitMvecs[-2] -= coef*unitMvecs[i]
unitMvecs[-2][:] = mp_re(unitMvecs[-2][:]) #the Arnoldi vectors should all be real since RgM is a family head and only non-zero singular vector of AsymG
norm = mp.sqrt(rgrid_Mmn_normsqr(unitMvecs[-2], rsqrgrid,rdiffgrid))
unitMvecs[-2] /= norm
if plotVectors:
rgrid_Mmn_plot(unitMvecs[-2], rgrid)
#get new vector
newvecM = shell_Green_grid_Mmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgMgrid,ImMgrid, unitMvecs[-2])
newvecM[:] = mp_re(newvecM)
vecnum += 1
Gmat.rows+=1; Gmat.cols+=1
for i in range(vecnum):
Gmat[i,vecnum-1] = rgrid_Mmn_vdot(unitMvecs[i], newvecM, rsqrgrid,rdiffgrid)
Gmat[vecnum-1,i] = Gmat[i,vecnum-1]
unitMvecs.append(newvecM) #append to end of unitMvecs for next round of iteration
return Gmat
def shell_Green_grid_Nmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, vecBgrid,vecPgrid):
"""
evaluates G(r,r')*vecN(r') over a shell region from R1 to R2
the region coordinates are contained in rsqrgrid, a grid of r^2, and rdiffgrid, the distances between neighboring grid points; these instead of the original rgrid are given so that they only need to be computed once in main Arnoldi method
"""
#rsqrgrid = rgrid**2
#rdiffgrid = np.diff(rgrid)
RgNvecNrsqr_grid = (RgBgrid*vecBgrid+RgPgrid*vecPgrid)*rsqrgrid
imfac = k**3 * grid_integrate_trap(RgNvecNrsqr_grid, rdiffgrid)
Im_newvecBgrid = imfac * RgBgrid
Im_newvecPgrid = imfac * RgPgrid
Re_ImNfactgrid = np.zeros_like(rsqrgrid, dtype=type(1j*mp.one))
Re_ImNfactgrid[1:] = k**3 * np.cumsum((RgNvecNrsqr_grid[:-1]+RgNvecNrsqr_grid[1:])*rdiffgrid/2.0)
rev_ImNvecNrsqr_grid = np.flip((ImBgrid*vecBgrid + ImPgrid*vecPgrid) * rsqrgrid) #reverse the grid direction to evaluate integrands of the form kr' to kR2
Re_RgNfactgrid = np.zeros_like(rsqrgrid, dtype=type(1j*mp.one))
Re_RgNfactgrid[:-1] = k**3 * np.flip(np.cumsum( (rev_ImNvecNrsqr_grid[:-1]+rev_ImNvecNrsqr_grid[1:])*np.flip(rdiffgrid)/2.0 ))
Re_newvecBgrid = -ImBgrid*Re_ImNfactgrid - RgBgrid*Re_RgNfactgrid
Re_newvecPgrid = -ImPgrid*Re_ImNfactgrid - RgPgrid*Re_RgNfactgrid - vecPgrid #last term is delta contribution
return Re_newvecBgrid + 1j*Im_newvecBgrid, Re_newvecPgrid + 1j*Im_newvecPgrid
def shell_Green_grid_Arnoldi_RgandImNmn_step_mp(n,k, invchi, rgrid,rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs,unitPvecs, Gmat, plotVectors=False):
"""
this method does one more Arnoldi step, given existing Arnoldi vectors in unitNvecs
the last two entries in unitMvecs is unitNvecs[-2]=G*unitNvecs[-4] and unitNvecs[-1]=G*unitNvecs[-3] without orthogonalization and normalization
its indices -1 and -3 because we are alternatingly generating new vectors starting from either the RgN line or the ImN line
so len(unitNvecs) = len(Gmat)+2 going in and going out of the method
this is setup for most efficient iteration since G*unitNvec is only computed once
the unitNvecs lists is modified on spot; a new enlarged Gmat mpmatrix is returned at the end
for each iteration we only advance Gmat by 1 row and 1 column
"""
#first, begin by orthogonalizing and normalizing unitMvecs[-1]
vecnum = Gmat.rows
for i in range(vecnum):
coef = Gmat[i,vecnum-2]
unitBvecs[-2] -= coef*unitBvecs[i]; unitPvecs[-2] -= coef*unitPvecs[i]
#the Arnoldi vectors should all be real since RgM is a family head and only non-zero singular vector of AsymG
unitBvecs[-2][:] = mp_re(unitBvecs[-2][:]); unitPvecs[-2][:] = mp_re(unitPvecs[-2][:])
norm = mp.sqrt(rgrid_Nmn_normsqr(unitBvecs[-2],unitPvecs[-2], rsqrgrid,rdiffgrid))
unitBvecs[-2] /= norm; unitPvecs[-2] /= norm
if plotVectors:
rgrid_Nmn_plot(unitBvecs[-2],unitPvecs[-2], rgrid)
#get new vector
newvecB,newvecP = shell_Green_grid_Nmn_vec_mp(n,k, rsqrgrid,rdiffgrid, RgBgrid,RgPgrid, ImBgrid,ImPgrid, unitBvecs[-2],unitPvecs[-2])
newvecB[:] = mp_re(newvecB); newvecP[:] = mp_re(newvecP)
vecnum += 1
Gmat.rows+=1; Gmat.cols+=1
for i in range(vecnum):
Gmat[i,vecnum-1] = rgrid_Nmn_vdot(unitBvecs[i],unitPvecs[i], newvecB,newvecP, rsqrgrid,rdiffgrid)
Gmat[vecnum-1,i] = Gmat[i,vecnum-1]
unitBvecs.append(newvecB); unitPvecs.append(newvecP) #append to end of unitNvecs for next round of iteration
return Gmat
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2447,
220,
642,
2310,
25,
3510,
25,
1120,
12131,
198,
198,
31,
9800,
25,
279,
1516,... | 2.270524 | 3,301 |
# Generated by Django 3.1.4 on 2021-03-06 12:10
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
3070,
12,
3312,
1105,
25,
940,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from model import ModelStep, Group
| [
6738,
2746,
1330,
9104,
8600,
11,
4912,
628,
198
] | 4.111111 | 9 |
import warnings
import torch
warnings.filterwarnings('ignore')
import os
from fastNLP.core.dataset import DataSet
from fastNLP.api.utils import load_url
from fastNLP.api.processor import ModelProcessor
from reproduction.chinese_word_segment.cws_io.cws_reader import ConllCWSReader
from reproduction.pos_tag_model.pos_reader import ZhConllPOSReader
from reproduction.Biaffine_parser.util import ConllxDataLoader, add_seg_tag
from fastNLP.core.instance import Instance
from fastNLP.api.pipeline import Pipeline
from fastNLP.core.metrics import SpanFPreRecMetric
from fastNLP.api.processor import IndexerProcessor
# TODO add pretrain urls
model_urls = {
"cws": "http://123.206.98.91:8888/download/cws_crf_1_11-457fc899.pkl",
"pos": "http://123.206.98.91:8888/download/pos_tag_model_20190108-f3c60ee5.pkl",
"parser": "http://123.206.98.91:8888/download/biaffine_parser-3a2f052c.pkl"
}
class POS(API):
"""FastNLP API for Part-Of-Speech tagging.
:param str model_path: the path to the model.
:param str device: device name such as "cpu" or "cuda:0". Use the same notation as PyTorch.
"""
def predict(self, content):
"""
:param content: list of list of str. Each string is a token(word).
:return answer: list of list of str. Each string is a tag.
"""
if not hasattr(self, "pipeline"):
raise ValueError("You have to load model first.")
sentence_list = []
# 1. 检查sentence的类型
if isinstance(content, str):
sentence_list.append(content)
elif isinstance(content, list):
sentence_list = content
# 2. 组建dataset
dataset = DataSet()
dataset.add_field("words", sentence_list)
# 3. 使用pipeline
self.pipeline(dataset)
dataset.apply(decode_tags, new_field_name="tag_output")
output = dataset.field_arrays["tag_output"].content
if isinstance(content, str):
return output[0]
elif isinstance(content, list):
return output
| [
11748,
14601,
198,
198,
11748,
28034,
198,
198,
40539,
654,
13,
24455,
40539,
654,
10786,
46430,
11537,
198,
11748,
28686,
198,
198,
6738,
3049,
45,
19930,
13,
7295,
13,
19608,
292,
316,
1330,
6060,
7248,
198,
198,
6738,
3049,
45,
19930... | 2.420991 | 848 |
import decimal
import logging
import os
import smtplib
import time
import warnings
from datetime import datetime
import pandas as pd
import zmq
from binance.client import Client
from coinmarketcap import Market
from colorama import init, Fore, Back, Style
# noinspection PyUnresolvedReferences
from sentimentAnalyse import SentimentAnalyse
init(convert=True)
warnings.filterwarnings("ignore")
log_file = os.path.join(os.getcwd(), 'logs', str(time.strftime('%Y %m %d %H')) + ' activity.log')
logging.basicConfig(filename=log_file, level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
# TODO: read korean jgl 101 tips
# TODO: explain why this code is so good to client write on pad
# TODO: binance.products()
try:
CCTP()
except Exception as bigE:
logging.exception('program crashed {}'.format(bigE))
| [
11748,
32465,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
895,
83,
489,
571,
198,
11748,
640,
198,
11748,
14601,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1976,
76,
80,
198,
6738,
... | 2.90411 | 292 |
from flask import Blueprint
main = Blueprint('main', __name__)
# try:
from app.main import views
# except:
# print('*'*50,"Error","*"*50)
| [
6738,
42903,
1330,
39932,
198,
198,
12417,
796,
39932,
10786,
12417,
3256,
11593,
3672,
834,
8,
198,
2,
1949,
25,
198,
6738,
598,
13,
12417,
1330,
5009,
198,
2,
2845,
25,
198,
2,
220,
220,
220,
220,
3601,
10786,
9,
6,
9,
1120,
553... | 2.769231 | 52 |
"""
this module contains the neccesary datastructures that are used by the utility functions in the cruds module
Contents
--------
Expression
This class is used to construct a function with the specified expression
"""
#types used to give type hintings wherever possible
import typing
#the sqlalchemy DeclarativeMeta class is imported to check if methods that require model arguments are actually modelsS
from sqlalchemy.ext.declarative.api import DeclarativeMeta as base
from sqlalchemy import or_, and_
from .datastructures import Queue, Stack
class Expression:
"""
This class is used to construct an expression node in the expression parse tree
Methods
-------
__init__
constructor, takes the model class the atrribute, the operator and a value to construct an expression function
get_expression
returns the constructed expression
"""
def __init__(self, model,
module_attribute: str, operator: str, value):
"""
Constructor for the Expression class.
The constructor takes a model, the desired_attribute, the comparitor operator and the value to compare to.
An lambda function is built where the contents of this function is the expression built with the arguments described.
Parameters
----------
model
sqlalchemy.ext.declarative.api.DeclativeMeta
class which inherits the base class from the sqlalchemy declaritive system
module_attribute
str
the desired attribute of the passed model which will be used to build the left side of the expression
operator
str
the boolean operator of the expression
value
str OR int OR float OR bool
the value which will be used to construct the right side of the expression
"""
#ensure arguments have valid types
if not isinstance(model, base):
raise TypeError('model must be of type sqlalchemy.ext.declarative.api.DeclativeMeta')
elif not isinstance(module_attribute, str):
raise TypeError('module_attribute must be of type str')
elif not isinstance(operator, str):
raise TypeError('operator must be of type str')
elif not isinstance(value, (str,int,float,bool)):
raise TypeError('value must be of type str OR int OR float OR bool')
#call __getattribute__ to ensure that the object attribute exists
model.__getattribute__(model,module_attribute)
#construct a dictionary with the possible lambda functions for each of the operators
valid_operators = {
'==': lambda: model.__getattribute__(model,module_attribute) == value,
'>=': lambda: model.__getattribute__(model,module_attribute) >= value,
'<=': lambda: model.__getattribute__(model,module_attribute) <= value,
'>': lambda: model.__getattribute__(model,module_attribute) > value,
'<': lambda: model.__getattribute__(model,module_attribute) < value,
'!=': lambda: model.__getattribute__(model,module_attribute) != value
}
#get the appriopriate lambda function
self.expression = valid_operators.get(operator)()
#if self.expression is none this means that the operator is invalid
if self.expression is None:
raise ValueError('operator is not valid')
def get_expression(self):
"""
get constructed expression
Returns
-------
function
the function with the expression
"""
return self.expression
class Operator:
"""
This class is used to construct an Operator node in the expression parse tree
"""
VALID_OPERATORS = set(['and', 'or'])
def __init__(self, operator = None):
"""
Constructor for Operator class.
"""
#check if arguments are of correct type
if operator is not None:
self.set_operator(operator)
else:
self.operator = None
self.children = Queue()
def get_operator(self):
"""
returns the boolean operator of this operator node
Returns
-------
str
the boolean operator of this operator node
"""
return self.operator
class ParseTree:
"""
This class is used to construct a parse tree and then evalaute this parse tree as a filter for a model query
Methods
__init__
class constructor, takes model of type sqlalchemy.ext.declarative.api.DeclarativeMeta and filters of type List[Dict] and initialises the parsetree
query
from the constructed parse tree query the model and return the results
"""
def __init__(self, model, filters: typing.List[typing.Dict]):
"""Constructor for ParseTree class.
Parameters
----------
model
sqlalchemy.ext.declarative.api.DeclarativeMeta
the desired model to be query
filters
list[dict]
the list of filters to use in the query
"""
#check if filters is a list
if not isinstance(filters, list):
raise TypeError('filters must be of type list with dict children')
#create a queue from the filters list
filt_queue = Queue(filters)
#create a traverse stack which keeps track of the parent
traverse_stack = Stack()
self.model = model
#set the root element and push to the traverse stack
self.root = Operator()
traverse_stack.push(self.root)
while not filt_queue.isEmpty():
#dequueue the elememnt. check if it is the last by peeking and construct the expression from the values of the element
elem = filt_queue.dequeue()
elem_next = filt_queue.peek()
elem_column = list(elem.keys())[0]
elem_value = elem[elem_column]['data']
elem_operator = elem[elem_column]['comparitor']
elem_expression = Expression(model, elem_column, elem_operator, elem_value)
current_parent = traverse_stack.pop()
current_operator = current_parent.get_operator()
#if the peeked next element is none we know this is the final element of the queue
if elem_next is None:
#get the parent from the traverse stack
#if the parent operator is none we know there is only one element in the filters list so we set the root to be the single expression
if current_operator is None:
self.root = elem_expression
#otherwise this is the final element of the filter list so we just append to the parent
else:
current_parent.enqueue_child(elem_expression)
#otherwise we have a logical operator
else:
#we get the current logical operator of the current element
elem_join = elem['join']
#we check whether this operator is an "or" OR "and"
if elem_join == 'or':
#we check if the current operator is an or
#because or is lower in the logical precedence or operators must always be the parents of and operator nodes
if current_operator == 'or':
#if the current parent is a node we just append the children in the child queue
current_parent.enqueue_child(elem_expression)
#we then repush the current node to the traverse stack
traverse_stack.push(current_parent)
elif current_operator == 'and':
#if the current expression parrent in the traverse stack is an and
#we check if there is already an existing or parent
if traverse_stack.isEmpty():
parent_parent = None
else:
parent_parent = traverse_stack.pop()
#if not we create a new or parent enqueing the expression to the current and expression parent
#we set the root to the new or parent and push it to the traverse stack
if parent_parent is None:
current_parent.enqueue_child(elem_expression)
self.root = Operator('or')
self.root.enqueue_child(current_parent)
traverse_stack.push(self.root)
else:
#otherwise we just append the child to the existing root which will always be an or
parent_parent.enqueue_child(elem_expression)
traverse_stack.push(parent_parent)
else:
#otherwise if the current_oprator is none we know that this is the first element so we set the operator and enqueue the child expression
current_parent.set_operator('or')
current_parent.enqueue_child(elem_expression)
traverse_stack.push(current_parent)
elif elem_join == 'and':
#we check the current operator
if current_operator == 'or':
#if the current operator is an or
#we must create a new and operator parent
child = Operator('and')
#we enqueue the current expression to this new and parent
child.enqueue_child(elem_expression)
#we penqueue the new operator parent to the root or parent
current_parent.enqueue_child(child)
#we push back the root onto the traverse stack
traverse_stack.push(current_parent)
#we push the new child into the traverse stack
traverse_stack.push(child)
elif current_operator == 'and':
#if the current_operator is and we append the expression as a child and push back the current_parent to the stack
current_parent.enqueue_child(elem_expression)
traverse_stack.push(current_parent)
else:
#otherwise we know that the current filter element is the first element in the list
# we set the operator of the root to and
current_parent.set_operator('and')
#we enqueue the child to the operator parent
current_parent.enqueue_child(elem_expression)
#we push back the current_parent to the stack
traverse_stack.push(current_parent)
def query(self, session):
#get a Query object for the current model for the current session
"""query the model using the constructed parse tree
Parameters
----------
session
this is a session instance created form the session factory
Returns
-------
Query
this is the queried data
"""
results = session.query(self.model)
if isinstance(self.root, Expression):
#if the root is an expression filter for the current expression and then return the resultant Query object
results = results.filter(self.root.get_expression())
return results
else:
#if the root is of an instance of the Operator class
if self.root.get_operator() == 'and':
#if the root operator is an and we know that there is no further Operator children
filters = []
#get the expressions and then returned the Query for the results by invoking the filter method on the list of filters
while not self.root.isEmpty():
filters.append(self.root.dequeue_child().get_expression())
results = results.filter(*filters)
return results
elif self.root.get_operator() == 'or':
#create an or queue to perserve the precedence of nodes that come first
or_queue = Queue()
#go over elements in root
filters = []
while not self.root.isEmpty():
current_child = self.root.dequeue_child()
#if the current_child is an expression append it to the or_queue
if isinstance(current_child, Expression):
or_queue.enqueue(current_child)
#otherwise if it is an operator filter the data on the children of the operator
elif isinstance(current_child, Operator):
one_filter = []
while not current_child.isEmpty():
one_filter.append(current_child.dequeue_child().get_expression())
filters.append(one_filter)
if len(filters) > 0 :
filts = []
for filt in filters:
filts.append(and_(*filt))
results = results.filter(or_(*filts))
filters = []
#filter the data on the expression children of the root
if not or_queue.isEmpty():
while not or_queue.isEmpty():
filters.append(or_queue.dequeue().get_expression())
results = results.filter(or_(*filters))
return results
| [
37811,
198,
5661,
8265,
4909,
262,
497,
535,
274,
560,
4818,
459,
1356,
942,
326,
389,
973,
416,
262,
10361,
5499,
287,
262,
1067,
24786,
8265,
198,
198,
15842,
198,
982,
198,
16870,
2234,
198,
220,
220,
220,
220,
770,
1398,
318,
97... | 2.265572 | 6,149 |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import cugraph
import numpy as np
def from_cudf_edgelist(df, source="src", target="dst"):
"""
Construct an enhanced graph from a cuDF edgelist that doesn't collapse
duplicate edges and includes columns for node degree and edge bundle.
"""
df = drop_index(df)
graph = cugraph.MultiDiGraph()
nodes = make_nodes(df, source, target)
edges = make_edges(df, source, target, nodes)
graph.edgelist = cugraph.Graph.EdgeList(edges["src"], edges["dst"])
nodes = nodes.set_index("id", drop=False).join(graph.degree().set_index("vertex"))
return graph, drop_index(nodes.sort_index()), edges
| [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.18961 | 385 |
import copy
import random
# Consider using the modules imported above.
| [
11748,
4866,
198,
11748,
4738,
198,
2,
12642,
1262,
262,
13103,
17392,
2029,
13,
628,
628,
220,
220,
220,
220,
628
] | 3.809524 | 21 |
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
1908,
563,
13,
9288,
26791,
1330,
21699,
590,
14402,
20448,
628
] | 4.045455 | 22 |
'''
uix.stacklayout tests
=====================
'''
import unittest
from kivy.uix.stacklayout import StackLayout
from kivy.uix.widget import Widget
| [
7061,
6,
198,
84,
844,
13,
25558,
39786,
5254,
198,
4770,
1421,
28,
198,
7061,
6,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
25558,
39786,
1330,
23881,
32517,
198,
6738,
479,
452,
88,
13,
84,
... | 2.960784 | 51 |
import logging
from django.contrib import auth
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin # https://stackoverflow.com/questions/42232606/django
# -exception-middleware-typeerror-object-takes-no-parameters
from django.conf import settings
from ...permissions import is_authenticated
from ...utils import set_session_key
from ... import app_settings
logger = logging.getLogger('django_sso_app')
ADMIN_URL = '/{}'.format(getattr(settings, 'ADMIN_URL', 'admin/'))
PROFILE_INCOMPLETE_ENABLED_PATHS = [
reverse('javascript-catalog'),
reverse('profile.complete'),
]
USER_TO_SUBSCRIBE_ENABLED_PATHS = PROFILE_INCOMPLETE_ENABLED_PATHS
class DjangoSsoAppAuthenticationBaseMiddleware(MiddlewareMixin):
"""
See django.contrib.auth.middleware.RemoteUserMiddleware.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
consumer_id_header = app_settings.APIGATEWAY_CONSUMER_CUSTOM_ID_HEADER
anonymous_consumer_custom_ids = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_IDS
anonymous_consumer_header = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER
anonymous_consumer_header_value = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER_VALUE
@staticmethod
@staticmethod
def _remove_invalid_user(request):
"""
Removes the current authenticated user in the request which is invalid.
"""
if is_authenticated(request.user):
logger.info('removing invalid user "{}"'.format(request.user))
auth.logout(request)
@staticmethod
@staticmethod
@staticmethod
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6284,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
10378,
8344,
341,
1330,
6046,
1574,
35608,
259,
220,
1303,
3740,
1378,
255... | 2.66035 | 686 |
'''
Copyright (C) 2020 Link Shortener Authors (see AUTHORS in Documentation).
Licensed under the MIT (Expat) License (see LICENSE in Documentation).
'''
from decouple import config
from unittest import TestCase
from json import loads
from link_shortener.server import create_app
| [
7061,
6,
198,
15269,
357,
34,
8,
12131,
7502,
10073,
877,
46665,
357,
3826,
37195,
20673,
287,
43925,
737,
198,
26656,
15385,
739,
262,
17168,
357,
3109,
8071,
8,
13789,
357,
3826,
38559,
24290,
287,
43925,
737,
198,
7061,
6,
198,
673... | 3.773333 | 75 |