content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from nb_autodoc.pycode.vcpicker import extract_all_comments, VariableCommentPicker
from nb_autodoc.pycode.overload import extract_all_overloads, OverloadPicker
| [
6738,
299,
65,
62,
2306,
375,
420,
13,
9078,
8189,
13,
85,
13155,
15799,
1330,
7925,
62,
439,
62,
15944,
11,
35748,
21357,
47,
15799,
198,
6738,
299,
65,
62,
2306,
375,
420,
13,
9078,
8189,
13,
2502,
2220,
1330,
7925,
62,
439,
62,... | 3.076923 | 52 |
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
| [
2,
11160,
7560,
416,
705,
16340,
2824,
12,
958,
11125,
6,
198,
6738,
1633,
12254,
13557,
85,
18738,
13,
958,
11125,
13,
27530,
13,
8692,
46616,
1330,
7308,
18843,
1352,
628
] | 3.580645 | 31 |
from reportlab.pdfgen import canvas
from GridPdf.myfunc import *
from settings import Settings
# Create a canvas
setting = Settings()
cSpec = CanvasSpec(setting)
c = canvas.Canvas(cSpec.filename + '.pdf', cSpec.size)
# Main draw func with inputs (object, detailTF, canvas, color, lineWidth)
draw(cSpec, 1, c, setting.colorMinor, setting.lineWidthMinor)
# if setting.majorLine is True:
# draw(cSpec, 0, c, setting.colorMajor, setting.lineWidthMajor)
# Footer
# c.setFont("Times-Roman", 7)
# c.drawString(cSpec.xStart, cSpec.yStart / 2, setting.footer)
c.showPage()
c.save()
| [
6738,
989,
23912,
13,
12315,
5235,
1330,
21978,
198,
6738,
24846,
47,
7568,
13,
1820,
20786,
1330,
1635,
198,
6738,
6460,
1330,
16163,
198,
198,
2,
13610,
257,
21978,
198,
33990,
796,
16163,
3419,
198,
66,
22882,
796,
1680,
11017,
22882... | 2.905 | 200 |
# -*- coding: utf-8 -*-
#
import sys
import numpy
import pytest
import asciiplotlib as apl
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
@pytest.mark.skipif(
sys.stdout.encoding not in ["UTF-8", "UTF8"],
reason="Need UTF-8 terminal (not {})".format(sys.stdout.encoding),
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
198,
11748,
12972,
9288,
198,
198,
11748,
355,
979,
24705,
313,
8019,
355,
257,
489,
628,
198,
31,
9078,
9288,
13,
4... | 2.387255 | 408 |
from django.contrib.auth import authenticate
from django.core.handlers.wsgi import WSGIRequest
from django.test import Client, RequestFactory, testcases
import ariadne
from graphql import ExecutionResult
from .settings import jwt_settings
from .shortcuts import get_token
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
198,
6738,
42625,
14208,
13,
7295,
13,
4993,
8116,
13,
18504,
12397,
1330,
25290,
18878,
18453,
198,
6738,
42625,
14208,
13,
9288,
1330,
20985,
11,
19390,
22810,
11,
1332,
... | 3.693333 | 75 |
import cv2
import argparse
from Retinaface.Retinaface import FaceDetector
from pathlib import Path
parser = argparse.ArgumentParser(description='take a picture')
parser.add_argument('--name', '-n', default='unknown', type=str, help='input the name of the recording person')
args = parser.parse_args()
save_path = Path('data/facebank')/args.name
if not save_path.exists():
save_path.mkdir()
# init camera
cap = cv2.VideoCapture(1)
cap.set(3, 1280)
cap.set(4, 720)
# init detector
detector = FaceDetector(name='resnet', weight_path='Retinaface/weights/resnet50.pth', device='cuda')
count = 4
while cap.isOpened():
_, frame = cap.read()
frame = cv2.putText(
frame, f'Press t to take {count} pictures, then finish...', (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0,100,0), 3, cv2.LINE_AA)
if cv2.waitKey(1) & 0xFF == ord('t'):
count -= 1
faces = detector.detect_align(frame)[0].cpu().numpy()
if len(faces.shape) > 1:
cv2.imwrite(f'{save_path}/{args.name}_{count}.jpg', faces[0])
if count <= 0:
break
else:
print('there is not face in this frame')
cv2.imshow("My Capture", frame)
cap.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
1822,
29572,
198,
6738,
4990,
1437,
2550,
13,
9781,
1437,
2550,
1330,
15399,
11242,
9250,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
20... | 2.316667 | 540 |
import os, shutil, subprocess, signal, sys, cv2
stream_path = "video"
fps = 16
# ffmpeg stream command
raw_command = "ffmpeg -protocol_whitelist file,udp,rtp -i sololink.sdp -y -vf fps=" + str(fps) + " -f image2 " + stream_path + "/img%09d.bmp"
if __name__ == "__main__":
# empty stream directory contents
if os.path.exists(stream_path):
shutil.rmtree(stream_path)
os.makedirs(stream_path)
signal.signal(signal.SIGINT, signal_handler)
# begin grabbing frames from stream
stream_process = subprocess.Popen(raw_command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
while True:
files = os.listdir(stream_path)
for x in range(0, len(files) - 1):
if x == len(files) - 2:
img = cv2.imread(stream_path + "/" + files[x])
if img is not None:
cv2.imshow("breh", img)
cv2.waitKey(100)
os.remove(stream_path + "/" + files[x])
#print os.listdir(stream_path)
# close stream safely
os.killpg(os.getpgid(stream_process.pid), signal.SIGTERM)
shutil.rmtree(stream_path)
print "closing safely"
| [
11748,
28686,
11,
4423,
346,
11,
850,
14681,
11,
6737,
11,
25064,
11,
269,
85,
17,
198,
198,
5532,
62,
6978,
796,
366,
15588,
1,
198,
29647,
796,
1467,
198,
198,
2,
31246,
43913,
4269,
3141,
198,
1831,
62,
21812,
796,
366,
487,
43... | 2.162362 | 542 |
# Generated by Django 3.1.6 on 2021-09-04 07:55
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2931,
12,
3023,
8753,
25,
2816,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pretend
import pytest
from warehouse import views
from warehouse.views import (
forbidden, index, httpexception_view, robotstxt, current_user_indicator,
search,
)
from ..common.db.packaging import (
ProjectFactory, ReleaseFactory, FileFactory,
)
from ..common.db.accounts import UserFactory
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.696203 | 237 |
from concurrent import futures
import time
import grpc
import hashtag_pb2
import hashtag_pb2_grpc
import redis
import os
r = redis.Redis(host=os.environ['REDIS_HOST_URL'], port=6379, db=0, charset="utf-8", decode_responses=True)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
if __name__ == '__main__':
serve()
| [
6738,
24580,
1330,
25650,
198,
198,
11748,
640,
198,
11748,
1036,
14751,
198,
11748,
23950,
62,
40842,
17,
198,
11748,
23950,
62,
40842,
17,
62,
2164,
14751,
198,
11748,
2266,
271,
198,
11748,
28686,
198,
198,
81,
796,
2266,
271,
13,
... | 2.561983 | 121 |
#!/usr/bin/python
import argparse
import sys
import suds
import re
from suds.client import Client
import numpy as np
import pandas as pd
import time
import requests
from urllib.request import urlretrieve
from urllib.parse import quote
import socket
from collections import Counter
import plotly.express as px
start_time = time.time()
server = "https://rest.ensembl.org"
ext = "/vep/human/hgvs/"
URL = 'https://mutalyzer.nl/services/?wsdl'
c = Client(URL, cache=None)
o = c.service
# ---parse commandline arguments---
# -----displays statistic over errors that occured during curation and they're frequency----#
# ----add missing genomic alterations based on cDNA coordinates via Mutalyzer numberConversion-----
# -----add missing protein alterations using genomic coordinates or cDNA and Mutalyzer----#
# ------ add missing consequences using Ensemble VEP -------
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
424,
9310,
198,
11748,
302,
198,
6738,
424,
9310,
13,
16366,
1330,
20985,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279... | 3.51341 | 261 |
#!/usr/bin/env python
"""
Riverbed Community Toolkit
NetIM - Synthetic Test
Script: Chrome-open-url-generic.py
Application: Chrome
Simple generic script that automates the Chrome browser on a windows machine to navigate to a page
The URL of the page to naviage must be passed in parameters
Usage:
python Chrome-open-url-generic.py "https://your-fqdn/your-path"
"""
import time, sys
# Configure Selenium
from selenium import webdriver
CHROMEDRIVER_PATH= "C:\\chromedriver_win32\\chromedriver.exe"
DEFAULT_URL = "https://www.riverbed.com"
DEFAULT_ROBOT_PROFILE_PATH = "C:\\robot-chrome-profile"
if __name__ == "__main__":
chrome_options = webdriver.ChromeOptions()
driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,chrome_options=chrome_options)
# Synthetic test
url = DEFAULT_URL
if (len(sys.argv) > 1):
url=sys.argv[1]
driver.get(url)
time.sleep(5)
driver.close()
driver.quit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
37811,
201,
198,
42204,
3077,
8108,
16984,
15813,
201,
198,
7934,
3955,
532,
26375,
6587,
6208,
201,
198,
201,
198,
7391,
25,
13282,
12,
9654,
12,
6371,
12,
41357,
13,
9078,
201,
... | 2.588859 | 377 |
from typing import Any
import pytest
from async_rx import Observer, rx_filter
from ..model import ObserverCounterCollector
from .model import get_observable
@pytest.mark.curio
| [
6738,
19720,
1330,
4377,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
30351,
62,
40914,
1330,
27058,
11,
374,
87,
62,
24455,
198,
198,
6738,
11485,
19849,
1330,
27058,
31694,
31337,
273,
198,
6738,
764,
19849,
1330,
651,
62,
672,
3168,... | 3.415094 | 53 |
import os
import re
from setuptools import setup, find_packages
__version__ = None
with open('pl/__init__.py') as f:
exec(f.read())
if "VERSION" in os.environ:
if os.environ["VERSION"]:
__version__ = os.environ["VERSION"]
# Convert version from Semantic Version into PEP 440
pattern = re.compile(r"""(?P<major_minor_patch>[0-9]*\.[0-9]*\.[0-9]*)(-.*\.(?P<increment>[0-9]*))?""", re.VERBOSE)
match = pattern.match(__version__)
if match:
__version__ = match.group("major_minor_patch")
if match.group("increment") is not None:
__version__ += ".dev" + match.group("increment")
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="pl",
version=__version__,
description="Python library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gbesancon/projects",
author="Gilles Besançon",
author_email="gilles.besancon@gmail.com",
packages=find_packages(exclude=['tests', 'tests.*']),
keywords=[],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | [
11748,
28686,
198,
11748,
302,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
834,
9641,
834,
796,
6045,
198,
4480,
1280,
10786,
489,
14,
834,
15003,
834,
13,
9078,
11537,
355,
277,
25,
198,
220,
220,
220,
... | 2.53252 | 492 |
#!/usr/bin/env python3
#
# Electron Cash - A Bitcoin Cash SPV Wallet
#
# This file Copyright (C) 2019 Calin Culianu <calin.culianu@gmail.com>
# License: MIT License
#
import time
import threading
import queue
import weakref
import math
from collections import defaultdict
from .util import PrintError, print_error
class ExpiringCache:
''' A fast cache useful for storing tens of thousands of lightweight items.
Use this class to cache the results of functions or other computations
when:
1. Many identical items are repetitively created (or many duplicate
computations are repetitively performed) during normal app
execution, and it makes sense to cache them.
2. The creation of said items is more computationally expensive than
accessing this cache.
3. The memory tradeoff is acceptable. (As with all caches, you are
trading CPU cost for memory cost).
An example of this is UI code or string formatting code that refreshes the
display with (mostly) the same output over and over again. In that case it
may make more sense to just cache the output items (such as the formatted
amount results from format_satoshis), rather than regenerate them, as a
performance tweak.
ExpiringCache automatically has old items expire if `maxlen' is exceeded.
Or, alternatively, if `timeout' is not None (and a positive nonzero number)
items are auto-removed if they are older than `timeout' seconds (even if
`maxlen' was otherwise not exceeded). Note that the actual timeout used
may be rounded up to match the tick granularity of the cache manager (see
below).
Items are timestamped with a 'tick count' (granularity of 10 seconds per
tick). Their timestamp is updated each time they are accessed via `get' (so
that only the oldest items that are least useful are the first to expire on
cache overflow).
get() and put() are fast. A background thread is used to safely
expire items when the cache overflows (so that get and put never stall
to manage the cache's size and/or to flush old items). This background
thread runs every 10 seconds -- so caches may temporarily overflow past
their maxlen for up to 10 seconds. '''
def size_bytes(self):
''' Returns the cache's memory usage in bytes. This is done by doing a
deep, recursive examination of the cache contents. '''
return get_object_size(
self.d.copy() # prevent iterating over a mutating dict.
)
def copy_dict(self):
''' Returns a copy of the cache contents. Useful for seriliazing
or otherwise examining the cache. The returned dict format is:
d[item_key] -> [tick, item_value]'''
return self.d.copy()
class _ExpiringCacheMgr(PrintError):
'''Do not use this class directly. Instead just create ExpiringCache
instances and that will handle the creation of this object automatically
and its lifecycle.
This is a singleton that manages the ExpiringCaches. It creates a thread
that wakes up every tick_interval seconds and expires old items from
overflowing extant caches.
Note that after the last cache is gc'd the manager thread will exit and
this singleton object also will expire and clean itself up automatically.'''
# This lock is used to lock _instance and self.caches.
# NOTE: This lock *must* be a recursive lock as the gc callback function
# may end up executing in the same thread as our add_cache() method,
# due to the way Python GC works!
_lock = threading.RLock()
_instance = None
tick = 0
tick_interval = 10.0 # seconds; we wake up this often to update 'tick' and also to expire old items for overflowing caches
debug = False # If true we print to console when caches expire and go away
@classmethod
@classmethod
@classmethod
@classmethod
def get_object_size(obj_0):
''' Debug tool -- returns the amount of memory taken by an object in bytes
by deeply examining its contents recursively (more accurate than
sys.getsizeof as a result). '''
import sys
import warnings
from numbers import Number
from collections import Set, Mapping, deque
try: # Python 2
zero_depth_bases = (basestring, Number, xrange, bytearray)
iteritems = 'iteritems'
except NameError: # Python 3
zero_depth_bases = (str, bytes, Number, range, bytearray)
iteritems = 'items'
def getsize(obj_0):
"""Recursively iterate to sum size of object & members."""
_seen_ids = set()
return inner(obj_0)
return getsize(obj_0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
5903,
1313,
16210,
532,
317,
6185,
16210,
6226,
53,
37249,
198,
2,
198,
2,
770,
2393,
15069,
357,
34,
8,
13130,
2199,
259,
32559,
666,
84,
1279,
9948,
259,
13,
3129,... | 3.225052 | 1,453 |
from base import *
import imp
#
# Sample cloud settings (for OpenShift)
# See https://github.com/openshift/django-example
#
# Turn off debug
DEBUG = False
if not DEBUG:
ALLOWED_HOSTS = [
# IMPORTANT: See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
'status.aksalj.me'
]
# Load the OpenShift helper library
lib_path = os.environ['OPENSHIFT_REPO_DIR'] + 'libs/'
modinfo = imp.find_module('openshiftlibs', [lib_path])
openshiftlibs = imp.load_module('openshiftlibs', modinfo[0], modinfo[1], modinfo[2])
# Override SECRET_KEY
# Make a dictionary of default keys
default_keys = {'SECRET_KEY': SECRET_KEY}
# Replace default keys with dynamic values
use_keys = openshiftlibs.openshift_secure(default_keys)
# Make this unique, and don't share it with anybody.
SECRET_KEY = use_keys['SECRET_KEY']
# Override DATABASES
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'sqlite3.db'),
'USER': 'whiskerboard',
'PASSWORD': '6Z75kPBNmrIswBDdrsIT',
'HOST': '',
'PORT': '',
}
}
# Override STATIC_ROOT
STATIC_ROOT = os.path.join(os.environ['OPENSHIFT_REPO_DIR'], 'wsgi', 'static')
| [
6738,
2779,
1330,
1635,
198,
11748,
848,
198,
2,
198,
2,
27565,
6279,
6460,
357,
1640,
4946,
33377,
8,
198,
2,
4091,
3740,
1378,
12567,
13,
785,
14,
44813,
29323,
14,
28241,
14208,
12,
20688,
198,
2,
198,
198,
2,
6756,
572,
14257,
... | 2.398844 | 519 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import email.mime.multipart
import email.mime.text
import logging
import os.path
import pickle
import re
import smtplib
import subprocess
import sys
from datetime import datetime, timedelta
from phabricator import Phabricator
# Setting up a virtualenv to run this script can be done by running the
# following commands:
# $ virtualenv venv
# $ . ./venv/bin/activate
# $ pip install Phabricator
GIT_REPO_METADATA = (("llvm", "https://llvm.org/git/llvm.git"), )
# The below PhabXXX classes represent objects as modelled by Phabricator.
# The classes can be serialized to disk, to try and make sure that we don't
# needlessly have to re-fetch lots of data from Phabricator, as that would
# make this script unusably slow.
reviews_cache = ReviewsCache()
users_cache = UsersCache()
PHABCACHESINFO = ((reviews_cache, ("differential", "revision", "search"),
"updated", record_reviews, 5, 7),
(users_cache, ("user", "search"), "newest", record_users,
100, 1000))
# All of the above code is about fetching data from Phabricator and caching it
# on local disk. The below code contains the actual "business logic" for this
# script.
_userphid2realname = None
reAuthorMail = re.compile("^author-mail <([^>]*)>.*$")
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
3053,
13,
76,
524,
13,
16680,
541,
433,
198,
11748,
3053,
13,
76,
524,
13,
5239,
198,
11... | 2.881874 | 491 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import model_base
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import and_
from neutron.db import api as db_api
from neutron.db.models import segment as segments_model
from neutron.objects import base as base_obj
from neutron.objects import network as network_obj
LOG = logging.getLogger(__name__)
NETWORK_TYPE = segments_model.NetworkSegment.network_type.name
PHYSICAL_NETWORK = segments_model.NetworkSegment.physical_network.name
SEGMENTATION_ID = segments_model.NetworkSegment.segmentation_id.name
NETWORK_ID = segments_model.NetworkSegment.network_id.name
def _make_segment_dict(obj):
"""Make a segment dictionary out of an object."""
#NOTE(jrichard) drop change in next rebase.
return {'id': obj.id,
NETWORK_TYPE: obj.network_type,
PHYSICAL_NETWORK: obj.physical_network,
SEGMENTATION_ID: obj.segmentation_id,
NETWORK_ID: getattr(obj, 'network_id', None)}
class SubnetSegment(model_base.BASEV2, model_base.HasId):
"""Represent persistent state of a subnet segment.
A subnet segment is a portion of a neutron subnet with a
specific physical realization. A neutron subnet can consist of
one or more segments.
"""
# TODO(alegacy): rename this similar to how the NetworkSegments table was
# renamed?
__tablename__ = 'ml2_subnet_segments'
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id', ondelete="CASCADE"),
nullable=False)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False,
server_default=sa.sql.false())
segment_index = sa.Column(sa.Integer, nullable=False, server_default='0')
def get_dynamic_segment(context, network_id, physical_network=None,
segmentation_id=None):
"""Return a dynamic segment for the filters provided if one exists."""
with db_api.context_manager.reader.using(context):
filters = {
'network_id': network_id,
'is_dynamic': True,
}
if physical_network:
filters['physical_network'] = physical_network
if segmentation_id:
filters['segmentation_id'] = segmentation_id
pager = base_obj.Pager(limit=1)
objs = network_obj.NetworkSegment.get_objects(
context, _pager=pager, **filters)
if objs:
return _make_segment_dict(objs[0])
else:
LOG.debug("No dynamic segment found for "
"Network:%(network_id)s, "
"Physical network:%(physnet)s, "
"segmentation_id:%(segmentation_id)s",
{'network_id': network_id,
'physnet': physical_network,
'segmentation_id': segmentation_id})
def delete_network_segment(context, segment_id):
"""Release a dynamic segment for the params provided if one exists."""
with db_api.context_manager.writer.using(context):
network_obj.NetworkSegment.delete_objects(context, id=segment_id)
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 2.541667 | 1,584 |
"""
.. _configurations:
Advanced Configurations
=======================
Defining Parameter Spaces
-------------------------
Optuna supports five kinds of parameters.
.. code-block:: python
def objective(trial):
# Categorical parameter
optimizer = trial.suggest_categorical('optimizer', ['MomentumSGD', 'Adam'])
# Int parameter
num_layers = trial.suggest_int('num_layers', 1, 3)
# Uniform parameter
dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 1.0)
# Loguniform parameter
learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1e-2)
# Discrete-uniform parameter
drop_path_rate = trial.suggest_discrete_uniform('drop_path_rate', 0.0, 1.0, 0.1)
...
Branches and Loops
------------------
You can use branches or loops depending on the parameter values.
.. code-block:: python
def objective(trial):
classifier_name = trial.suggest_categorical('classifier', ['SVC', 'RandomForest'])
if classifier_name == 'SVC':
svc_c = trial.suggest_loguniform('svc_c', 1e-10, 1e10)
classifier_obj = sklearn.svm.SVC(C=svc_c)
else:
rf_max_depth = int(trial.suggest_loguniform('rf_max_depth', 2, 32))
classifier_obj = sklearn.ensemble.RandomForestClassifier(max_depth=rf_max_depth)
...
.. code-block:: python
def create_model(trial):
n_layers = trial.suggest_int('n_layers', 1, 3)
layers = []
for i in range(n_layers):
n_units = int(trial.suggest_loguniform('n_units_l{}'.format(i), 4, 128))
layers.append(L.Linear(None, n_units))
layers.append(F.relu)
layers.append(L.Linear(None, 10))
return chainer.Sequential(*layers)
Please also refer to `examples <https://github.com/optuna/optuna/tree/master/examples>`_.
Note on the Number of Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The difficulty of optimization increases roughly exponentially with regard to the number of parameters. That is, the number of necessary trials increases exponentially when you increase the number of parameters, so it is recommended to not add unimportant parameters.
Arguments for `Study.optimize`
--------------------------------
The method :func:`~optuna.study.Study.optimize` (and ``optuna study optimize`` CLI command as well)
has several useful options such as ``timeout``.
For details, please refer to the API reference for :func:`~optuna.study.Study.optimize`.
**FYI**: If you give neither ``n_trials`` nor ``timeout`` options, the optimization continues until it receives a termination signal such as Ctrl+C or SIGTERM.
This is useful for use cases such as when it is hard to estimate the computational costs required to optimize your objective function.
"""
| [
37811,
198,
492,
4808,
11250,
20074,
25,
198,
198,
28809,
17056,
20074,
198,
4770,
1421,
18604,
198,
198,
7469,
3191,
25139,
2357,
48086,
198,
22369,
12,
198,
198,
27871,
9613,
6971,
1936,
6982,
286,
10007,
13,
198,
198,
492,
2438,
12,
... | 2.810139 | 1,006 |
import logging
import enum
import soco
from soco_plugin.message import Command as Parent
from soco_plugin.command import Mixin
class Command(Parent):
"""
>>> import home
>>> import soco_plugin
>>> cmd = soco_plugin.command.volume.ramp.Command.make(["Bath"])
>>> old_state = home.appliance.sound.player.state.off.State()
>>> old_state = old_state.next(home.event.presence.Event.On)
>>> old_state = old_state.next(home.event.sleepiness.Event.Asleep)
>>> new_state = old_state.next(home.event.sleepiness.Event.Awake)
>>> msg = cmd.make_msgs_from(old_state, new_state)
>>> msg[0]["fields"]["volume"]
30
>>> msg[0]["fields"]["ramp_type"]
'SLEEP_TIMER_RAMP_TYPE'
"""
ACTION = "ramp_to_volume"
Msg = {
"type": "soco",
"name": ACTION,
"fields": {"volume": 10, "ramp_type": "SLEEP_TIMER_RAMP_TYPE"},
"addresses": [],
}
| [
11748,
18931,
198,
11748,
33829,
198,
198,
11748,
1307,
78,
198,
6738,
1307,
78,
62,
33803,
13,
20500,
1330,
9455,
355,
16774,
198,
6738,
1307,
78,
62,
33803,
13,
21812,
1330,
15561,
259,
628,
198,
4871,
9455,
7,
24546,
2599,
198,
220... | 2.363402 | 388 |
from functools import wraps
import strongr.restdomain.model.gateways
# oauth2 lib does not support namespaces so we need a hack
# https://github.com/lepture/flask-oauthlib/issues/180
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
1913,
81,
13,
2118,
27830,
13,
19849,
13,
10494,
1322,
198,
198,
2,
267,
18439,
17,
9195,
857,
407,
1104,
3891,
43076,
523,
356,
761,
257,
8156,
198,
2,
3740,
1378,
12567,
13,
785,
14... | 3.172414 | 58 |
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
4033,
26448,
1330,
6910,
36307,
628,
628,
628
] | 3.529412 | 34 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '0.13.1'
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
28000,
1098,
62,
17201... | 2.216667 | 60 |
/*
Copyright 2016 BitTorrent Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import dpkt, bencode, struct, traceback, sys, argparse, socket
listMax = 40
bad = 0
no_version = 0
nonUtIps = {}
versionIps = {}
bandwidth = { "in":{}, "out":{}, "bad":{ "noId":0, "notEncoded":0 } }
def bootstrapCount(fp):
global no_version, bad, nonUtIps, versionIps
pcap = dpkt.pcap.Reader(fp)
i = 0
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
#Get the remote IP address and location identifier
try:
src_ip_addr_str = socket.inet_ntoa(ip.src)
locId = src_ip_addr_str + ":" + str(tcp.sport)
except:
try: bandwidth["bad"]["noId"] += len(tcp.data)
except: pass
continue
try:
decoded = bencode.bdecode(tcp.data)
except:
bandwidth["bad"]["notEncoded"] += len(tcp.data)
bad += 1
continue
version = decoded.get("v")
if not version:
#No version, we assume it's outbound. Change the locId
src_ip_addr_str = socket.inet_ntoa(ip.dst)
locId = src_ip_addr_str + ":" + str(tcp.dport)
#Set outbound bandwidth
try: bandwidth["out"][locId] += len(tcp.data)
except: bandwidth["out"][locId] = len(tcp.data)
no_version += 1
continue
#We have a version, we assume it's inbound.
try: bandwidth["in"][locId] += len(tcp.data)
except: bandwidth["in"][locId] = len(tcp.data)
if version[0:2] != "UT":
try: nonUtIps[version][locId] += 1
except:
try: nonUtIps[version][locId] = 1
except: nonUtIps[version] = { locId: 1 }
continue
#Read the version
version = version[2:]
unpackedVersion = struct.unpack('>H', version)
unpackedVersion = unpackedVersion[0]
#Add it to the structured map.
try: versionIps[unpackedVersion][locId] += 1
except:
try: versionIps[unpackedVersion][locId] = 1
except: versionIps[unpackedVersion] = { locId: 1 }
i += 1
if (i % 100) == 0:
sys.stdout.write(".")
sys.stdout.flush()
"""
print '============================'
print tcp.sport
print '============================'
print decoded
print '============================'
print version
print '============================'
print unpackedVersion
print '============================'
print
print
"""
fp.close()
print
######################################################
if __name__ == '__main__':
#Parse the args
parser = argparse.ArgumentParser()
parser.add_argument(action="store", nargs='?', dest="pcapPath", help="The tcpdump PCAP file", metavar="[pcap file path]")
args = parser.parse_args()
#Have enough args?
if not args.pcapPath:
print "Usage: readBuildsFromTcpDump.py [pcap file path]\n"
exit(1)
try: fp = open(args.pcapPath)
except:
print "Cannot open '" + args.pcapPath + "'"
exit(1)
try: bootstrapCount(fp)
except:
traceback.print_exc()
versionPairs = []
for build, ipMap in versionIps.iteritems():
bandwidthOut = 0
for locId in ipMap.keys():
bandwidthOut += bandwidth["out"].get(locId, 0)
versionPairs.append([build, sum(ipMap.values()), len(ipMap), bandwidthOut])
print
print "======================================================"
print "UT Builds (top " + str(listMax) + ")"
print "======================================================"
vpSorted = sorted(versionPairs, key=lambda pair: pair[1], reverse=True)
for idx, pair in enumerate(vpSorted):
if idx > listMax: break
ver = pair[0]
out = pair[3]
outPer = out / pair[1]
ratio = round(float(pair[1])/pair[2], 2)
print "Build " + str(ver) + ":\t\t" +\
str(pair[1]) + " // " +\
str(pair[2]) + " unique // " +\
str(ratio) + " ratio // " +\
str(out) + " out // " +\
str(outPer) + " per request"
nonUtPairs = []
for build, ipMap in nonUtIps.iteritems():
bandwidthOut = 0
for locId in ipMap.keys():
bandwidthOut += bandwidth["out"].get(locId, 0)
nonUtPairs.append([build, sum(ipMap.values()), len(ipMap), bandwidthOut])
print
print "======================================================"
print "Other Clients (top " + str(listMax) + ")"
print "======================================================"
nutSorted = sorted(nonUtPairs, key=lambda pair: pair[1], reverse=True)
for idx, pair in enumerate(nutSorted):
if idx > listMax: break
ver = pair[0]
out = pair[3]
outPer = out / pair[1]
ratio = round(pair[1]/pair[2], 2)
try:
unpackedVersion = struct.unpack('>H', ver[2:])
ver = ver[0:2] + str(unpackedVersion[0])
except:
ver = "??? " + ver.strip()
print "Build " + str(ver) + ":\t\t" +\
str(pair[1]) + " // " +\
str(pair[2]) + " unique // " +\
str(ratio) + " ratio // " +\
str(out) + " out // " +\
str(outPer) + " per request"
print
print "======================================================"
print "Miscellaneous"
print "======================================================"
print "Bad: \t" + str(bad)
print "No Version:\t" + str(no_version)
print
print
print bandwidth["bad"]
| [
15211,
198,
15269,
1584,
4722,
39286,
3457,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
1639,
7... | 2.235604 | 2,848 |
#!/usr/bin/env python3
import sys
import math
import pprint
import os
import re
import doctest
import itertools
import types
import logging
from collections import deque
from collections import defaultdict
#import networkx as nx
from copy import deepcopy
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# create absolute mydir
mydir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(mydir, '../lib'))
from advent import *
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == "TEST":
import doctest
doctest.testmod()
sys.exit(0)
logging.basicConfig(level=logging.INFO)
path = "input.txt"
if len(sys.argv) > 1:
path = sys.argv[1]
with open(path) as f:
data = [ list(tokenize(d, ' =\n[]')) for d in f.readlines()]
memory = defaultdict(lambda: 0)
for inst in data:
cmd = inst[0]
print("INST", inst)
if cmd == 'mask':
print('='*40)
mask = inst[1]
maskV = int(inst[1].replace('X','0'), 2)
maskX = int(inst[1].replace('1', '0').replace('X', '1'), 2)
maskC = sum(1 if x == 'X' else 0 for x in inst[1])
# maskShift = [ 35 - e[0] if e[1] == 'X' else -1 for e in enumerate(inst[1]) ]
maskShift = [ 35 - e[0] for e in enumerate(inst[1]) if e[1] == 'X']
#print('MV', mask2str(maskV))
#print('MX', mask2str(maskX))
#print(mask2str(5))
elif cmd == 'mem':
mr = int(inst[1])
nv = int(inst[2])
for inc in range(2 ** maskC):
mr2 = applyMask(mask, mr, maskV, maskX, maskShift, inc)
print("MEMORY[%d]=%d" % (mr2, nv))
memory[mr2] = nv
else:
raise False
print(sum(memory.values()))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
10688,
198,
11748,
279,
4798,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
10412,
395,
198,
11748,
340,
861,
10141,
198,
11748,
3858,
198,
11748,
18... | 2.050661 | 908 |
from rest_framework import routers
from .api import (
ClassroomViewSet, CurrentFacilityViewSet, DeviceOwnerViewSet, FacilityUserViewSet, FacilityViewSet, LearnerGroupViewSet, MembershipViewSet, RoleViewSet,
SessionViewSet
)
router = routers.SimpleRouter()
router.register(r'facilityuser', FacilityUserViewSet)
router.register(r'deviceowner', DeviceOwnerViewSet)
router.register(r'membership', MembershipViewSet)
router.register(r'role', RoleViewSet)
router.register(r'facility', FacilityViewSet)
router.register(r'currentfacility', CurrentFacilityViewSet, base_name='currentfacility')
router.register(r'session', SessionViewSet, base_name='session')
router.register(r'classroom', ClassroomViewSet)
router.register(r'learnergroup', LearnerGroupViewSet)
urlpatterns = router.urls
| [
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
764,
15042,
1330,
357,
198,
220,
220,
220,
5016,
3823,
7680,
7248,
11,
9236,
47522,
879,
7680,
7248,
11,
16232,
42419,
7680,
7248,
11,
29118,
12982,
7680,
7248,
11,
29118,
7680,
7248,
... | 3.198381 | 247 |
from django.contrib import admin
from books.models import Author, Book, PublicationLanguage
admin.site.register(Author)
admin.site.register(PublicationLanguage)
admin.site.register(Book)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
3835,
13,
27530,
1330,
6434,
11,
4897,
11,
45065,
32065,
198,
198,
28482,
13,
15654,
13,
30238,
7,
13838,
8,
198,
28482,
13,
15654,
13,
30238,
7,
15202,
341,
32065,
8,
... | 3.634615 | 52 |
__version__ = "2.1.0"
__MODEL_HUB_ORGANIZATION__ = 'sentence-transformers'
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
| [
834,
9641,
834,
796,
366,
17,
13,
16,
13,
15,
1,
201,
198,
834,
33365,
3698,
62,
39,
10526,
62,
1581,
45028,
14887,
6234,
834,
796,
705,
34086,
594,
12,
35636,
364,
6,
201,
198,
6738,
764,
19608,
292,
1039,
1330,
11352,
3007,
2735... | 3.132075 | 106 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
11748,
334,
27112,
628
] | 2.902439 | 41 |
"""Golden tests cases for testing illegal tags."""
from liquid.golden.case import Case
cases = [
Case(
description="unknown tag",
template=r"{% nosuchthing %}",
expect="",
error=True,
strict=True,
),
Case(
description="no addition operator",
template=r"{% assign x = 1 + 2 %}{{ x }}",
expect="",
error=True,
strict=True,
),
Case(
description="no subtraction operator",
template=r"{% assign x = 1 - 2 %}{{ x }}",
expect="",
error=True,
strict=True,
),
Case(
description="no multiplication operator",
template=r"{% assign x = 2 %}{{ x * 3 }}",
expect="",
error=True,
strict=True,
),
]
| [
37811,
32378,
5254,
2663,
329,
4856,
5293,
15940,
526,
15931,
198,
198,
6738,
8122,
13,
24267,
268,
13,
7442,
1330,
8913,
198,
198,
33964,
796,
685,
198,
220,
220,
220,
8913,
7,
198,
220,
220,
220,
220,
220,
220,
220,
6764,
2625,
34... | 2.117166 | 367 |
from abc import ABCMeta
class ApiModel(metaclass=ABCMeta):
"""Abstract class for defining a new API object"""
| [
6738,
450,
66,
1330,
9738,
48526,
628,
198,
4871,
5949,
72,
17633,
7,
4164,
330,
31172,
28,
24694,
48526,
2599,
198,
220,
220,
220,
37227,
23839,
1398,
329,
16215,
257,
649,
7824,
2134,
37811,
198
] | 3.314286 | 35 |
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Collections.General import IdList
#from RouToolPa.Tools.Abstract import Tool
#from RouToolPa.Routines import NCBIRoutines
from RouToolPa.Tools.LinuxTools import Axel
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ids", action="store", dest="ids",
type=lambda s: s.split(","),
help="Comma-separated list of SRA ids to download")
parser.add_argument("-f", "--id_file", action="store", dest="id_file",
help="File with SRA ids(one per line) to download")
parser.add_argument("-t", "--threads", action="store", dest="threads", type=int, default=1,
help="Number of simultaneous downloads")
parser.add_argument("-c", "--connections", action="store", dest="connections", type=int, default=8,
help="Number of connections for each download")
args = parser.parse_args()
if (not args.ids) and (not args.id_file):
raise ValueError("Both ids and id file were not set")
id_list = IdList(filename=args.id_file) if args.id_file else args.ids
Axel.threads = args.threads
Axel.parallel_download_from_sra(id_list, args.connections)
"""
options_list = []
for entry_id in id_list:
ftp_path = NCBIRoutines.get_sra_ftp_path_from_id(entry_id)
options_list.append("-n %i %s" % (args.connections, ftp_path))
tool = Tool(cmd="axel", max_threads=args.threads)
tool.parallel_execute(options_list)
for filename in os.listdir(os.getcwd()):
if ".sra" not in filename:
continue
tool.safe_mkdir(filename[:-4])
os.system("mv %s %s/" % (filename, filename[:-4]))
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
834,
9800,
834,
796,
705,
7089,
469,
72,
376,
13,
14770,
1428,
6,
198,
11748,
1822,
29572,
198,
6738,
13876,
25391,
28875,
13,
5216,
26448,
13,
12218,
1330,
5121,
8053,
628,
198,
2,
... | 2.507485 | 668 |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import DEFAULT_IMAGE, DEFAULT_PULL_POLICY, DEFAULT_IC_TYPE, DEFAULT_SERVICE
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption("--context",
action="store", default="", help="context name as in the kubeconfig")
parser.addoption("--image",
action="store", default=DEFAULT_IMAGE, help="image with tag (image:tag)")
parser.addoption("--image-pull-policy",
action="store", default=DEFAULT_PULL_POLICY, help="image pull policy")
parser.addoption("--ic-type",
action="store", default=DEFAULT_IC_TYPE, help="provide ic type")
parser.addoption("--service",
action="store",
default=DEFAULT_SERVICE,
help="service type: nodeport or loadbalancer")
parser.addoption("--node-ip", action="store", help="public IP of a cluster node")
parser.addoption("--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="an absolute path to kubeconfig")
# import fixtures into pytest global namespace
pytest_plugins = [
"suite.fixtures"
]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip the tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
| [
37811,
24564,
4892,
4045,
9355,
8398,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
198,
6738,
479,
18478,
3262,
274,
13,
11250,
13,
74,
3266,
62,
11250,
1330,
509,
10526,
36,
62,
10943,
16254,
62,
7206,
38865,
62,
... | 2.319953 | 847 |
#!/usr/bin/env python
import os
import requests
import json
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('local_settings.cfg')
dictionary = {'baseURL': config.get('ArchivesSpace', 'baseURL'), 'repository':config.get('ArchivesSpace', 'repository'), 'user': config.get('ArchivesSpace', 'user'), 'password': config.get('ArchivesSpace', 'password'), 'destination': config.get('Destinations', 'METSdestination')}
# authenticates the session
auth = requests.post(baseURL + '/users/'+user+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
# Gets the IDs of all digital objects in the repository
doIds = requests.get(baseURL + '/repositories/'+repository+'/digital_objects?all_ids=true', headers=headers)
# Exports METS for all digital objects
for id in doIds.json():
digital_object = (requests.get(baseURL + '/repositories/'+repository+'/digital_objects/' + str(id), headers=headers)).json()
doID = digital_object["digital_object_id"]
mets = requests.get(baseURL + '/repositories/'+repository+'/digital_objects/mets/'+str(id)+'.xml', headers=headers).text
if not os.path.exists(os.path.join(destination, doID)):
os.makedirs(os.path.join(destination, doID))
f = open(os.path.join(destination, doID, doID)+'.xml', 'w+')
f.write(mets.encode('utf-8'))
f.close
print doID + ' exported to ' + destination
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
17056,
46677,
198,
198,
11250,
796,
17056,
46677,
13,
16934,
46677,
3419,
198,
11250,
13,
961,
10786,
12001,
62,
33692,
... | 2.892057 | 491 |
from tests.utils import W3CTestCase
| [
6738,
5254,
13,
26791,
1330,
370,
18,
4177,
395,
20448,
628
] | 3.363636 | 11 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3.111111 | 45 |
__author__ = "David Adrian"
__copyright__ = "Copyright 2017, AI Research, Data Technology Centre, Volkswagen Group"
__credits__ = ["David Adrian, Richard Kurle"]
__license__ = "MIT"
__maintainer__ = "David Adrian"
| [
834,
9800,
834,
796,
366,
11006,
21462,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
2177,
11,
9552,
4992,
11,
6060,
8987,
9072,
11,
32991,
4912,
1,
198,
834,
66,
20696,
834,
796,
14631,
11006,
21462,
11,
6219,
18132,
293,
8973,
... | 3.451613 | 62 |
# coding=utf-8
# Author: Li xinming
# coding=utf-8
from math import log10
from math import pow
from operator import attrgetter
from random import choice
from random import random
from random import sample
from sys import argv
# save_out = stdout
# 输出重定向至指定的文件中,便于查看
# file_obj = open('out.txt', 'w+')
# stdout = file_obj
# 记录每个结点的相邻结点 like as follows: rec = {1:[2,3], 2:[3,4]} and so on
# 种群
if __name__ == '__main__':
param_len = len(argv)
if param_len !=5:
exit(0)
# new:coef old:r
param1 = argv[1]
# Pc
param2 = argv[2]
# Pm
param3 = argv[3]
# test_file
test_file = argv[4]
Global.r = float(param1)
Global.pc = float(param2)
# print "param2=",param2
# exit(0)
Global.pm = float(param3)
# 图形初始化
rate = 0
avg_iteration_time = 0
# f = open('test01.txt','r')
# f = open('test02.txt', 'r')
# f = open('test03.txt', 'r')
# 打开测试文件
f = open('test03_new.txt', 'r')
# f = open('test04.txt', 'r')
line = f.readline().split()
# print line
node_num = int(line[0])
edge_num = int(line[1])
Global.min_cost = int(line[2])
# print node_num
# print edge_num
graph = Graph(node_num, edge_num)
f.readline()
line = f.readline().split()
src = int(line[0])
dst = int(line[1])
pop_scale = int(line[2])
# pc = float(line[3])
pc = Global.pc
# pm = float(line[4])
pm = Global.pm
delay_w = int(line[5])
Global.delay_w = delay_w
# (row_num, col_num, BandWidth, Delay, Cost)
# param_length会随着的度量参数的增加而增大
param_length = 5
graph.init_edge_measure(f, param_length)
graph.init_node_adjs()
# print '----------node_adjs----------'
# print graph.get_node_adjs()
# print '------------------->graph.cost<------------'
# print graph.cost
# #print graph.bandwidth[0][1]
# #print graph.cost[0][1]
time = 0
while time < Global.LOOP_TIME:
iter = 0
population = Population(graph, src, dst, pop_scale, pc, pm, delay_w)
pop_size = population.get_popsize()
# print 'pop_size=', pop_size
generations = 0
best_fitnesses = []
avg_fitnesses = []
min_costs = []
flag = True
count = 0
sum_generation = 0
ratio = 0
population.calculate_fitness()
while generations < Global.MAX_GENERATION:
# print '--------------------generations=>>>>>', generations, '<<<<--------------'
# 计算种群中所以个体的适应度值
# population.calculate_fitness()
for i in range(pop_size):
# s1 = Population.random_chromosome(graph, 0, 4)
s1 = population.chromosomes[i]
# print 'i=', i, ': ', s1.get_solution(), ";Fitness=%.6f" % (s1.get_fitness())
population.choose()
# population.choose_jbs()
population.crossover()
population.mutate()
population.update()
population.calculate_fitness()
avg_fitness = population.avg_fitness
avg_fitnesses.append(avg_fitness)
best_fitnesses.append(population.get_best_fitness())
best_chromosome = Chromosome()
best_chromosome.set_solution(population.best_solution)
min_cost = best_chromosome.get_total_cost(graph)
min_costs.append(min_cost)
# if flag and fabs(population.get_best_fitness()*100-2.77777777778) >= 1.0e-11:
# sum_generation += generations
# flag = False
# 自适应变异概率,随着种群的平均适应度值变大,其变异概率应该减小
# Global.pm = 1 - population.avg_fitness/population.best_fitness
generations += 1
# print 'iiiii'
# 计算找到最优解的成功率
# if min_cost==13:
if min_cost == Global.min_cost:
rate += 1
# 计算算法收敛到最优解的最小迭代次数的平均值
location = len(best_fitnesses) - 1
indexes = [i for i in range(location + 1)]
for index in indexes[-1:0:-1]:
if best_fitnesses[index] == best_fitnesses[index - 1]:
iter += 1
else:
break
iter_time = location - iter
avg_iteration_time += iter_time
# print '222222'
time += 1
# print 'oooooo'
ration = rate*100.0/Global.LOOP_TIME
iter_time = avg_iteration_time*1.0/Global.LOOP_TIME
print 'rate=', ration
print 'avg_iteration_time=', iter_time
result = param1+"\t"+param2+"\t"+param3+"\t"+str(ration)+"\t"+str(iter_time)+"\n"
print result
# f = open("result_old", "a+")
result_file_name = "./"+test_file+"/result_old"
f = open(result_file_name, "a+")
# f.write(param1+"\t"+param2+"\t"+param3+"\t"+str(ration)+"\t"+str(iter_time)+"\n")
f.write(result)
# stdout = save_out
# print 'rate=',rate
# print 'avg_iteration_time=',avg_iteration_time
# long running
# endtime = clock()
# 只计算程序运行的CPU时间
# #print "program costs time is %.8f s" % (endtime - starttime)
# x = [i for i in range(MAX_GENERATION)]
# y = best_fitnesses
# z = avg_fitnesses
# u = min_costs
# info = 'node_num=%d, edge_num=%d, pop_scale=%d, r=%.3f pc=%.3f, pm=%.3f, global_min_cost=%d, best_solution=%s, respective_delay=%d'
# value = (node_num, edge_num, pop_scale,Global.r, pc, pm, min_cost, population.best_solution, population.respective_delay)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
6434,
25,
220,
220,
7455,
2124,
259,
2229,
198,
2,
19617,
28,
40477,
12,
23,
198,
6738,
10688,
1330,
2604,
940,
198,
6738,
10688,
1330,
7182,
198,
6738,
10088,
1330,
708,
81,
1136,
353,
198,
67... | 1.909379 | 2,836 |
import numpy as np
import networkx as nx
import pickle
from graphik.graphs.graph_base import RobotPlanarGraph
from graphik.robots.robot_base import RobotPlanar
from graphik.utils.utils import list_to_variable_dict, make_save_string
from graphik.utils.experiments import (
run_multiple_experiments,
process_experiment,
scatter_error_between_solvers,
)
if __name__ == "__main__":
# Experiment params
dim = 2
dof = 10
n = dof
seed = 8675309
np.random.seed(seed)
# Keep whichever algorithms you want to run ('trust-exact', 'Newton-CG', and 'trust-constr' are the best)
# local_algorithms_unbounded = [
# "BFGS",
# "CG",
# "Newton-CG",
# "trust-exact"
# ]
# local_algorithms_bounded = [
# "L-BFGS-B",
# "TNC",
# "SLSQP",
# "trust-constr"
# ]
local_algorithms_unbounded = ["trust-exact"]
local_algorithms_bounded = ["trust-constr"]
n_goals = 10 # Number of goals
n_init = 1 # Number of initializations to try (should be 1 for zero_init = True and for bound_smoothing = True)
zero_init = True # True makes the angular solvers MUCH better w
use_limits = False # Whether to use angular limits for all the solvers
do_jacobian = False # Jacobian doesn't work well for zero_init (need a more local starting point)
fabrik_only = (
False # Only run the FABRIK solver (messy utility for re-running after the bug)
)
pose_goals = True
symbolic = False
if fabrik_only:
do_jacobian = False
if fabrik_only:
local_algorithms = []
else:
local_algorithms = (
local_algorithms_bounded if use_limits else local_algorithms_unbounded
)
# Solver params
verbosity = (
2 # Needs to be 2 for Riemannian solver at the moment TODO: make it smarter!!
)
maxiter = 2000 # Most algs never max it (Riemannian ConjugateGradient often does)
tol = 1e-9 # This is the key parameter, will be worth playing with (used for gtol except for SLSQP)
initial_tr_radius = 1.0 # This is a key parameter for trust-constr and trust-exact.
trigsimp = False # Not worth setting to True for n_init = 1
if fabrik_only:
riemannian_algorithms = []
else:
# riemannian_algorithms = ["TrustRegions", "ConjugateGradient"]
riemannian_algorithms = ["TrustRegions"]
solver_params = {
"solver": "BFGS",
"maxiter": maxiter,
"tol": tol,
"initial_tr_radius": initial_tr_radius,
}
bound_smoothing = True # Riemannian algs will do with and without bound smoothing when this is True
riemannian_alg1 = riemannian_algorithms[0] if not fabrik_only else "TrustRegions"
riemann_params = {
"solver": riemannian_alg1,
"logverbosity": verbosity,
"mingradnorm": tol,
"maxiter": maxiter,
}
jacobian_params = {
"tol": tol,
"maxiter": maxiter,
"dt": 1e-3,
"method": "dls_inverse",
}
fabrik_tol = 1e-9
fabrik_max_iter = (
maxiter # FABRIK is faster per iteration, might be worth changing this around
)
# Save string setup
save_string_properties = [
("dof", dof),
("bounded", use_limits),
("tol", tol),
("maxiter", maxiter),
("n_goals", n_goals),
("n_init", n_init),
("zero_init", zero_init),
]
if fabrik_only:
save_string = "results/FABRIK_only_planar_chain_" + make_save_string(
save_string_properties
)
else:
save_string = "results/planar_chain_" + make_save_string(save_string_properties)
# Robot params
# link_lengths = list_to_variable_dict(np.random.rand(dof) * 2.0 + 1.0)
link_lengths = list_to_variable_dict(np.ones(dof))
if use_limits:
lim = np.minimum(np.random.rand(n) * np.pi + 0.2, np.pi)
else:
# Try to keep the seed the same
# _ = np.minimum(np.random.rand(n) * np.pi + 0.2, np.pi)
lim = np.pi * np.ones(n)
lim_u = list_to_variable_dict(lim)
lim_l = list_to_variable_dict(-lim)
params = {
"a": link_lengths,
"theta": list_to_variable_dict(len(link_lengths) * [0.0]),
"joint_limits_upper": lim_u,
"joint_limits_lower": lim_l,
}
robot = RobotPlanar(params)
graph = RobotPlanarGraph(robot)
results = run_multiple_experiments(
graph,
n_goals,
n_init,
zero_init,
solver_params,
riemann_params,
jacobian_params,
use_limits,
verbosity,
bound_smoothing,
local_algorithms,
riemannian_algorithms,
fabrik_max_iter,
use_symbolic=symbolic,
trigsimp=trigsimp,
do_jacobian=do_jacobian,
pose_goals=True,
)
# results.robot = robot
# results.seed = seed
# pickle.dump(results, open(save_string + "full_results.p", "wb"))
process_experiment(results)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
2298,
293,
198,
198,
6738,
4823,
1134,
13,
34960,
82,
13,
34960,
62,
8692,
1330,
16071,
20854,
283,
37065,
198,
6738,
4823,
1134,
13,
22609,
1747,
13,
305,... | 2.211224 | 2,263 |
import telebot
from config import keys, TOKEN
from utils import CryptoConverter, ConvertionException
bot = telebot.TeleBot (TOKEN)
@bot.message_handler(commands=['start', 'help'])
@bot.message_handler(commands=['values'])
@bot.message_handler(content_types=['text', ])
@bot.message_handler(content_types=['photo', ])
@bot.message_handler(content_types=['voice', ])
bot.polling()
| [
11748,
5735,
13645,
201,
198,
6738,
4566,
1330,
8251,
11,
5390,
43959,
201,
198,
6738,
3384,
4487,
1330,
36579,
3103,
332,
353,
11,
38240,
295,
16922,
201,
198,
201,
198,
13645,
796,
5735,
13645,
13,
31709,
20630,
357,
10468,
43959,
8,
... | 2.677632 | 152 |
from setuptools import setup
setup(
name='N th Fibonacci Number',
version='1.0',
description='This program provides nth fibonacci number.',
author='Lalit Bangad, Anirudh Pande, Pratyush Vaidya',
author_email='llbangad@ncsu.edu, apande@ncsu.edu,pavaidya@ncsu.edu',
url='https://github.com/lalit10/CSC510-Group19',
packages=[],
classifiers=[
"License :: OSI Approved :: MIT",
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Engineering",
],
keywords='',
license='MIT',
install_requires=[],
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
45,
294,
41566,
261,
44456,
7913,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
15,
3256,
198,
220,
220,
220,
6764,
11639,
1212,
1430,
3769,
... | 2.505618 | 267 |
import pytest
import numpy as np
import scanpy as sc
@pytest.mark.parametrize(
"method",
["t-test", "logreg"],
)
| [
11748,
12972,
9288,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
9367,
9078,
355,
629,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
24396,
1600,
198,
220,
220,
220,
14631,
83,
12... | 2.403846 | 52 |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.compat.v1 import keras
# from tensorflow.compat.v1. keras.datasets import mnist
# from tensorflow.compat.v1.keras.datasets import fashion_mnist
# from tensorflow.compat.v1.keras.datasets import cifar10
from tensorflow.compat.v1.keras import backend
#
# from sklearn.model_selection import train_test_split
from population import Population
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from copy import deepcopy
| [
198,
11748,
11192,
273,
11125,
13,
5589,
265,
13,
85,
16,
355,
48700,
198,
27110,
13,
40223,
62,
85,
17,
62,
46571,
3419,
198,
198,
6738,
11192,
273,
11125,
13,
5589,
265,
13,
85,
16,
1330,
41927,
292,
198,
2,
422,
11192,
273,
111... | 2.84492 | 187 |
from random import randint
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
Menu() | [
6738,
4738,
1330,
43720,
600,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
220,
220,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,... | 2.75 | 44 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from ..standard.default import schedule as base_schedule
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
6738,
11485,
20307,
13,
12286,
1330,
7269,
355,
2779,
62,
15952,
5950,
198
] | 4.225806 | 31 |
# import pickle untuk membaca model yang disimpan
import pickle
# import sklearn untuk menggunakan algoritma KNN
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
# import Flask untuk membuat web server
from flask import Flask, render_template, request
# buat objek Flask sebagai web server
app = Flask(__name__, static_folder="assets")
# membaca model yang sudah disimpan sebelumnya
scaler: StandardScaler = pickle.load(open("iris-scaler.model", 'rb'))
classifier: KNeighborsClassifier = pickle.load(open("iris-classification.model", 'rb'))
# RUTE HOME (/) - Ini adalah rute saat mengakses root website.
@app.route("/")
# RUTE PREDICT (/predict) - Ini adalah rute saat user men-submit
# data melalui form untuk melakukan prediksi
@app.route("/predict", methods=["POST"])
# mulai web server
if __name__ == "__main__":
app.run(debug=True) | [
2,
1330,
2298,
293,
1418,
2724,
1066,
65,
22260,
2746,
331,
648,
595,
320,
6839,
198,
11748,
2298,
293,
198,
198,
2,
1330,
1341,
35720,
1418,
2724,
1450,
1130,
403,
461,
272,
435,
7053,
270,
2611,
509,
6144,
198,
6738,
1341,
35720,
... | 2.896774 | 310 |
"""
This tutorial shows you how to use a data recorder to record some data for
imitation learning for instance and how to load the data again. Or replay some
episodes.
"""
from causal_world.envs.causalworld import CausalWorld
from causal_world.task_generators.task import generate_task
from causal_world.loggers.data_recorder import DataRecorder
from causal_world.loggers.data_loader import DataLoader
import causal_world.viewers.task_viewer as viewer
if __name__ == '__main__':
example()
| [
37811,
198,
1212,
11808,
2523,
345,
703,
284,
779,
257,
1366,
38156,
284,
1700,
617,
1366,
329,
198,
320,
3780,
4673,
329,
4554,
290,
703,
284,
3440,
262,
1366,
757,
13,
1471,
24788,
617,
198,
538,
8052,
13,
198,
37811,
198,
6738,
2... | 3.531915 | 141 |
from django.core.files.storage import Storage
from fdfs_client.client import Fdfs_client
from django.conf import settings
class FastDFSStorage(Storage):
"""自定义文件存储系统"""
def _open(self, name, mode='rb'):
"""打开文件时会自动调用的方法
因为这个类是实现存储,不涉及到文件的打开,所以这个方法用不到,但是,必须文档告诉我必须实现,所以pass
"""
pass
def _save(self, name, content):
"""
文件要存储时会自动的调用的方法:借此机会将要存储的文件上传到fastdfs
:param name: 要存储的文件的名字
:param content: 要存储的文件对象,是File类型的对象,需要调用read()读取出里面的文件内容二进制
:return: file_id
"""
# 创建fdfs客户端
# client = Fdfs_client('meiduo_mall/utils/fastdfs/client.conf')
client = Fdfs_client(self.client_conf)
# 调用上传的方法:upload_by_buffer()是使用文件的二进制上传的
ret = client.upload_by_buffer(content.read())
# 判断文件上传是否成功
if ret.get('Status') != 'Upload successed.':
raise Exception('fastfds upload error')
# 如果上传成功就将file_id返回出去
file_id = ret.get('Remote file_id')
# 本次return会将file_id自动的存储到ImageField字段对应的模型属性中,并自动的同步到数据库
return file_id
def exists(self, name):
"""告诉Django文件是否存在
本次的文件的存储需要转存到fastdfs,不需要在本地存储,所以每次要存储某个文件时,都需要返回False
返回False,是告诉Django本地没有的,那么Django才会去存储,才会去调用save()方法
"""
return False
def url(self, name):
"""
需要在这个方法中,拼接文件的全路径,用于将来做文件的下载的
<img src="{{ content.image.url }}">
:param name: 文件的名字:group1/M00/00/00/wKhnhFtWKcOAcNjGAAC4j90Tziw97.jpeg
:return: 文件的全路径:http://192.168.103.132:8888/group1/M00/00/00/wKhnhFtWKcOAcNjGAAC4j90Tziw97.jpeg
"""
# return 'http://192.168.103.132:8888/' + name
return self.base_url + name | [
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
35350,
1330,
20514,
198,
6738,
277,
7568,
82,
62,
16366,
13,
16366,
1330,
376,
7568,
82,
62,
16366,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628,
198,
4871,
12549,
8068,
50,
31425,
7... | 1.234783 | 1,380 |
import numpy as np
import gym
from gym.agents.base import BaseAgent
if __name__ == '__main__':
_test_env()
| [
11748,
299,
32152,
355,
45941,
198,
198,
11748,
11550,
198,
6738,
11550,
13,
49638,
13,
8692,
1330,
7308,
36772,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4808,
9288,
62,
24330,
3419,
... | 2.761905 | 42 |
from django.test import client
from django.urls import URLPattern
from .enums import HttpMethod
from .helpers import reverse_url
PUBLIC_ENDPOINTS: dict[str, tuple] = {
"admin": HttpMethod.safe_methods(),
}
ACCEPTABLE_PUBLIC_ENDPOINT_STATUSES: set[int] = {
200,
400,
404,
405,
}
ACCEPTABLE_AUTHENTICATED_ENDPOINT_STATUSES: set[int] = {401}
| [
6738,
42625,
14208,
13,
9288,
1330,
5456,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
10289,
47546,
198,
198,
6738,
764,
268,
5700,
1330,
367,
29281,
17410,
198,
6738,
764,
16794,
364,
1330,
9575,
62,
6371,
198,
198,
5105,
32936,
62,
... | 2.357143 | 154 |
#%%
import pandas as pd
folders = ['2021-04-04_REL606_glucose_growth',
'2021-04-27_REL606_acetate_growth']
dfs = []
for i, f in enumerate(folders):
data = pd.read_csv(f'../../../data/growth_curves/{f}/processed/{f}.csv')
dfs.append(data)
data = pd.concat(dfs, sort=False)
if 'time_idx' in data.keys():
data.drop(columns='time_idx', inplace=True)
data.to_csv('../../../data/collated_growth_measurements.csv', index=False)
# %%
| [
2,
16626,
198,
11748,
19798,
292,
355,
279,
67,
220,
198,
198,
11379,
364,
796,
37250,
1238,
2481,
12,
3023,
12,
3023,
62,
16448,
33206,
62,
4743,
1229,
577,
62,
27922,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.192308 | 208 |
import numpy as np
import cv2
import os
from edge_pixels import find_edge_pixels
from fft import to_complex_number, fft, complex_to_number
for img in os.listdir('./inputs'):
print(f"Processando: {img}")
x = cv2.imread('./inputs/' + img, 0)
hei, wid = x.shape[:2]
output_shape = np.zeros(x.shape[:2])
list_edge, output_img = find_edge_pixels(x)
complex_list = to_complex_number(list_edge)
list_fourier = fft(complex_list, 2)
output_list = complex_to_number(list_fourier)
for pixel in output_list:
output_shape[pixel[0], pixel[1]] = 255
cv2.imwrite('out.png', output_img)
cv2.imwrite('out_shape.png', output_shape)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
6738,
5743,
62,
79,
14810,
1330,
1064,
62,
14907,
62,
79,
14810,
198,
6738,
277,
701,
1330,
284,
62,
41887,
62,
17618,
11,
277,
701,
11,
3716,
62,
1462,... | 2.449219 | 256 |
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from yatube.settings import POSTS_COUNT
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
@login_required
@login_required
@login_required
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
31525,
20900,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
... | 3.28 | 125 |
import copy
from haco.utils.callback import HACOCallbacks
from haco.utils.config import baseline_eval_config
from haco.utils.human_in_the_loop_env import HumanInTheLoopEnv
from haco.utils.train import train
from haco.utils.train_utils import get_train_parser
from ray.rllib.agents.ppo.ppo import PPOTrainer
evaluation_config = {"env_config": copy.deepcopy(baseline_eval_config)}
if __name__ == '__main__':
args = get_train_parser().parse_args()
exp_name = args.exp_name or "PPO"
stop = {"timesteps_total": 1000_0000}
config = dict(
env=HumanInTheLoopEnv,
env_config=dict(
main_exp=False
),
# ===== Evaluation =====
evaluation_interval=1,
evaluation_num_episodes=30,
evaluation_config=evaluation_config,
evaluation_num_workers=2,
metrics_smoothing_episodes=30,
# ===== Training =====
horizon=1500,
num_sgd_iter=20,
lr=5e-5,
grad_clip=10.0,
rollout_fragment_length=200,
sgd_minibatch_size=100,
train_batch_size=4000,
num_gpus=0.2 if args.num_gpus != 0 else 0,
num_cpus_per_worker=0.1,
num_cpus_for_driver=0.5,
num_workers=8,
clip_actions=False
)
train(
PPOTrainer,
exp_name=exp_name,
keep_checkpoints_num=5,
stop=stop,
config=config,
num_gpus=args.num_gpus,
num_seeds=5,
custom_callback=HACOCallbacks,
# test_mode=True,
# local_mode=True
)
| [
11748,
4866,
198,
198,
6738,
289,
10602,
13,
26791,
13,
47423,
1330,
367,
2246,
4503,
439,
10146,
198,
6738,
289,
10602,
13,
26791,
13,
11250,
1330,
14805,
62,
18206,
62,
11250,
198,
6738,
289,
10602,
13,
26791,
13,
10734,
62,
259,
62... | 2.089431 | 738 |
"""decorators module
"""
from . import utils as u
__author__ = "Bruno Lange"
__email__ = "blangeram@gmail.com"
__license__ = "MIT"
| [
37811,
12501,
273,
2024,
8265,
198,
37811,
198,
198,
6738,
764,
1330,
3384,
4487,
355,
334,
198,
198,
834,
9800,
834,
796,
366,
33,
5143,
78,
47579,
1,
198,
834,
12888,
834,
796,
366,
2436,
2564,
321,
31,
14816,
13,
785,
1,
198,
8... | 2.627451 | 51 |
'''
自动识别抽奖
直播间互动区 “关键字”出现次数>4 时,弹窗提示开始抽奖
前期准备工作需要安装:Python环境,opencv,pillow,ADB并配置好环境变量,百度文本识别
'''
#coding:utf8
import os
from PIL import Image
#import pytesseract
import cv2
import ctypes
from aip import AipOcr
'''
def ocr_text():
# 文本识别pytesseract,准确度底,弃用
image = Image.open('img/textextract.png')
tessdata_dir_config = '--tessdata-dir "D:\\Program Files (x86)\\Tesseract-OCR\\tessdata"'
text = pytesseract.image_to_string(image, lang='chi_sim', config=tessdata_dir_config)
print(text)
return text
'''
# 配置百度AipOcr
APP_ID = '11637513'
API_KEY = 'gL1FSye2D8QlcBrz2q7TQZYh'
SECRET_KEY = '2cfn2mGZWGws0mhlxmINRBprr2A9qekf'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
while 1:
get_screen()
cut_image()
extract_text()
text = baidu_ocr_text()
string_count = string_lottery(text, "有草")
if string_count >= 4:
ctypes.windll.user32.MessageBoxW(0, '要抽奖了,关键词出现次数:'+str(string_count), '抽奖了', 0)
break
| [
7061,
6,
201,
198,
164,
229,
103,
27950,
101,
46237,
228,
26344,
104,
162,
232,
121,
25001,
244,
201,
198,
33566,
112,
162,
240,
255,
29785,
112,
12859,
240,
27950,
101,
44293,
118,
564,
250,
17739,
111,
165,
242,
106,
27764,
245,
4... | 1.446629 | 712 |
# from __future__ import annotations
import inspect
import dill
import logging
from warnings import warn
from typing import Type, TypeVar, Any, Mapping, Dict, Optional, List
from typing import Generator, MutableMapping, Callable, Set
from functools import WRAPPER_ASSIGNMENTS
from collections import OrderedDict
import copy
import ray
import torch
from io import StringIO
from ruamel.yaml.representer import RepresenterError
from ruamel.yaml import ScalarNode
from ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
CommentedKeySeq, CommentedSeq, TaggedScalar,
CommentedKeyMap)
from flambe.compile.serialization import load_state_from_file, State, load as flambe_load, \
save as flambe_save
from flambe.compile.registrable import Registrable, alias, yaml, registrable_factory
from flambe.compile.const import STATE_DICT_DELIMETER, FLAMBE_SOURCE_KEY, FLAMBE_CLASS_KEY, \
FLAMBE_CONFIG_KEY, FLAMBE_DIRECTORIES_KEY, KEEP_VARS_KEY, VERSION_KEY, FLAMBE_STASH_KEY
_EMPTY = inspect.Parameter.empty
A = TypeVar('A')
C = TypeVar('C', bound="Component")
YAML_TYPES = (CommentedMap, CommentedOrderedMap, CommentedSet, CommentedKeySeq, CommentedSeq,
TaggedScalar, CommentedKeyMap)
logger = logging.getLogger(__name__)
class Schema(MutableMapping[str, Any]):
"""Holds and recursively initializes Component's with kwargs
Holds a Component subclass and keyword arguments to that class's
compile method. When an instance is called it will perform the
recursive compilation process, turning the nested structure of
Schema's into initialized Component objects
Implements MutableMapping methods to facilitate inspection and
updates to the keyword args. Implements dot-notation access to
the keyword args as well.
Parameters
----------
component_subclass : Type[Component]
Subclass of Component that will be compiled
**keywords : Any
kwargs passed into the Schema's `compile` method
Examples
-------
Create a Schema from a Component subclass
>>> class Test(Component):
... def __init__(self, x=2):
... self.x = x
...
>>> tp = Schema(Test)
>>> t1 = tp()
>>> t2 = tp()
>>> assert t1 is t2 # the same Schema always gives you same obj
>>> tp = Schema(Test) # create a new Schema
>>> tp['x'] = 3
>>> t3 = tp()
>>> assert t1.x == 3 # dot notation works as well
Attributes
----------
component_subclass : Type[Component]
Subclass of Schema that will be compiled
keywords : Dict[str, Any]
kwargs passed into the Schema's `compile` method
"""
def add_extensions_metadata(self, extensions: Dict[str, str]) -> None:
"""Add extensions used when loading this schema and children
Uses ``component_subclass.__module__`` to filter for only the
single relevant extension for this object; extensions relevant
for children are saved only on those children schemas directly.
Use ``aggregate_extensions_metadata`` to generate a dictionary
of all extensions used in the object hierarchy.
"""
# Get top level module
modules = self.component_subclass.__module__.split('.')
# None sentinel won't be in extensions
top_level_module = modules[0] if len(modules) > 0 else None
if top_level_module is not None and top_level_module in extensions:
self._extensions = {top_level_module: extensions[top_level_module]}
else:
self._extensions = {}
for child in self.keywords.values():
helper(child)
def aggregate_extensions_metadata(self) -> Dict[str, str]:
"""Aggregate extensions used in object hierarchy"""
exts = dict(self._extensions or {}) # non-nested so shallow copy ok
for child in self.keywords.values():
helper(child)
return exts
# TODO uncomment recursive?
# @recursive_repr()
def __repr__(self) -> str:
"""Identical to super (schema), but sorts keywords"""
keywords = ", ".join("{}={!r}".format(k, v) for k, v in sorted(self.keywords.items()))
format_string = "{module}.{cls}({component_subclass}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__qualname__,
component_subclass=self.component_subclass,
keywords=keywords)
@classmethod
@staticmethod
def serialize(obj: Any) -> Dict[str, Any]:
"""Return dictionary representation of schema
Includes yaml as a string, and extensions
Parameters
----------
obj: Any
Should be schema or dict of schemas
Returns
-------
Dict[str, Any]
dictionary containing yaml and extensions dictionary
"""
with StringIO() as stream:
yaml.dump(obj, stream)
serialized = stream.getvalue()
exts: Dict[str, str] = {}
# TODO: temporary until Pipeline object exists
if isinstance(obj, dict):
for value in obj.values():
exts.update(value.aggregate_extensions_metadata())
else:
exts.update(obj.aggregate_extensions_metadata())
rep = {'yaml': serialized, 'extensions': exts}
return rep
@staticmethod
def deserialize(data: Dict[str, Any]) -> Any:
"""Construct Schema from dict returned by Schema.serialize
Parameters
----------
data: Dict[str, Any]
dictionary returned by ``Schema.serialize``
Returns
-------
Any
Schema or dict of schemas (depending on yaml in ``data``)
"""
yaml_str = data['yaml']
extensions = data['extensions']
obj = yaml.load(yaml_str)
# TODO: temporary until Pipeline object exists
if isinstance(obj, dict):
for value in obj.values():
value.add_extensions_metadata(extensions)
else:
obj.add_extensions_metadata(extensions)
return obj
# Add representer for dumping Schema back to original yaml
# Behaves just like Component `to_yaml` but compilation not needed
yaml.representer.add_representer(Schema, Schema.to_yaml)
# Used to contextualize the representation of links during YAML
# representation
_link_root_obj = None
_link_prefix = None
_link_context_active = False
_link_obj_stash: Dict[str, Any] = {}
class contextualized_linking:
"""Context manager used to change the representation of links
Links are always defined in relation to some root object and an
attribute path, so when representing some piece of a larger object
all the links need to be redefined in relation to the target object
"""
@alias('$')
@alias('@')
@alias('link')
class Link(Registrable):
"""Represent a dependency in your object hierarchy
A Link delays the access of some property, or the calling of some
method, until the Link is called. Links can be passed directly
into a Component subclass `compile`, Component's method called
compile will automatically record the links and call them to
access their values before running `__new__` and `__init__`. The
recorded links will show up in the config if `yaml.dump()` is
called on your object hierarchy. This typically happens when
logging individual configs during a grid search, and when
serializing between multiple processes
Parameters
----------
ref : str
Period separated list of keywords starting with the block id
and ending at the target attribute. For example,
`b1.model.encoder.hidden_size`.
obj : Optional[Any]
Object named by ref's first keyword
local : bool
if true, changes tune convert behavior to insert a dummy link;
used for links to global variables ("resources" in config)
Attributes
----------
ref : str
Period separated list of keywords starting with the block id
and ending at the target attribute.
var_name : str
The name of the class of `obj`
attr : List[str]
Attribute of `obj` that will be accessed
obj : Any
Object containing the attribute or method to link. If it is a
Schema it will be compiled when the Link is called
if necessary
local : bool
if true, changes tune convert behavior to insert a dummy link;
used for links to global variables ("resources" in config)
"""
@classmethod
def to_yaml(cls, representer: Any, node: Any, tag: str) -> Any:
"""Build contextualized link based on the root node
If the link refers to something inside of the current object
hierarchy (as determined by the global prefix `_link_prefix`)
then it will be represented as a link; if the link refers to
something out-of-scope, i.e. not inside the current object
hiearchy, then replace the link with the resolved value. If
the value cannot be represented throw an exception.
Raises
-------
RepresenterError
If the link is "out-of-scope" and the value cannot be
represented in YAML
"""
global _link_root_obj
global _link_prefix
global _link_context_active
global _link_obj_stash
final_link = node.attr[:]
referenced_root = node.obj._compiled if isinstance(node.obj, Schema) else node.obj
if _link_context_active:
if _link_prefix is None:
raise TypeError('Link context active but prefix not set')
if _link_prefix != '':
# If there is a prefix, iterate through the prefix
# navigating from the root object If the attribute
# path continues past the link's own attribute path, OR
# a non-matching attribute is found, this link is
# "out-of-scope", so try copying the value
prefix = _link_prefix.split(STATE_DICT_DELIMETER)
for i, attr in enumerate(prefix):
if len(node.attr) <= i or node.attr[i] != attr:
if isinstance(node._resolved, Registrable):
return node._resolved.to_yaml(representer, node._resolved,
node._resolved._created_with_tag) # type: ignore # noqa: E501
else:
try:
return representer.represent_data(node._resolved)
except RepresenterError:
obj_id = str(len(_link_obj_stash.keys()))
_link_obj_stash[obj_id] = node._resolved
data_link = PickledDataLink(obj_id=obj_id)
return PickledDataLink.to_yaml(representer, data_link, '!$')
final_link = final_link[1:]
elif referenced_root is not _link_root_obj:
# No prefix, but the referenced root object doesn't
# match so it's out-of-scope
if isinstance(node._resolved, Registrable):
return node._resolved.to_yaml(representer, node._resolved,
node._resolved._created_with_tag) # type: ignore
else:
try:
return representer.represent_data(node._resolved)
except RepresenterError:
obj_id = str(len(_link_obj_stash.keys()))
_link_obj_stash[obj_id] = node._resolved
data_link = PickledDataLink(obj_id=obj_id)
return PickledDataLink.to_yaml(representer, data_link, '!$')
# Root object matches and no prefix, or prefix exists in
# current object hiearchy
# i.e. "in-scope"
return representer.represent_scalar(tag, STATE_DICT_DELIMETER.join(final_link))
# No contextualization necessary
return representer.represent_scalar(tag, node.ref)
@classmethod
@alias('call')
class FunctionCallLink(Link):
"""Calls the link attribute instead of just accessing it"""
def activate_links(kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Iterate through items in dictionary and activate any `Link`s
Parameters
----------
kwargs : Dict[str, Any]
A dictionary of kwargs that may contain instances of `Link`
Returns
-------
Dict[str, Any]
Copy of the original dictionay with all Links activated
Examples
-------
Process a dictionary with Links
>>> class A(Component):
... def __init__(self, x=2):
... self.x = x
...
>>> a = A(x=1)
>>> kwargs = {'kw1': 0, 'kw2': Link("ref_for_a.x", obj=a)}
>>> activate_links(kwargs)
{'kw1': 0, 'kw2': 1}
"""
return {
# Retrieve actual value of link before initializing
kw: kwargs[kw]() if isinstance(kwargs[kw], Link) else kwargs[kw]
for kw in kwargs
}
def activate_stash_refs(kwargs: Dict[str, Any], stash: Dict[str, Any]) -> Dict[str, Any]:
"""Activate the pickled data links using the loaded stash"""
return {
kw: kwargs[kw](stash) if isinstance(kwargs[kw], PickledDataLink) else kwargs[kw]
for kw in kwargs
}
def fill_defaults(kwargs: Dict[str, Any], function: Callable[..., Any]) -> Dict[str, Any]:
"""Use function signature to add missing kwargs to a dictionary"""
signature = inspect.signature(function)
kwargs_with_defaults = kwargs.copy()
for name, param in signature.parameters.items():
if name == "self":
continue
default = param.default
if name not in kwargs and default != _EMPTY:
kwargs_with_defaults[name] = default
return kwargs_with_defaults
def merge_kwargs(kwargs: Dict[str, Any], compiled_kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Replace non links in kwargs with corresponding compiled values
For every key in `kwargs` if the value is NOT a link and IS a
Schema, replace with the corresponding value in `compiled_kwargs`
Parameters
----------
kwargs : Dict[str, Any]
Original kwargs containing Links and Schemas
compiled_kwargs : Dict[str, Any]
Processes kwargs containing no links and no Schemas
Returns
-------
Dict[str, Any]
kwargs with links, but with Schemas replaced by compiled
objects
"""
merged_kwargs = {}
for kw in kwargs:
if not isinstance(kwargs[kw], Link) and isinstance(kwargs[kw], Schema):
if kw not in compiled_kwargs:
raise CompilationError('Non matching kwargs and compiled_kwargs')
merged_kwargs[kw] = compiled_kwargs[kw]
else:
merged_kwargs[kw] = kwargs[kw]
return merged_kwargs
class Component(Registrable):
"""Class which can be serialized to yaml and implements `compile`
IMPORTANT: ALWAYS inherit from Component BEFORE `torch.nn.Module`
Automatically registers subclasses via Registrable and
facilitates immediate usage in YAML with tags. When loaded,
subclasses' initialization is delayed; kwargs are wrapped in a
custom schema called Schema that can be easily initialized
later.
"""
_flambe_version = '0.0.0' # >0.0.0 opts into semantic versioning
def run(self) -> bool:
"""Run a single computational step.
When used in an experiment, this computational step should
be on the order of tens of seconds to about 10 minutes of work
on your intended hardware; checkpoints will be performed in
between calls to run, and resources or search algorithms will
be updated. If you want to run everything all at once, make
sure a single call to run does all the work and return False.
Returns
-------
bool
True if should continue running later i.e. more work to do
"""
# By default it doesn't do anything and doesn't continue
continue_ = False
return continue_
def metric(self) -> Optional[float]:
"""Override this method to enable scheduling and searching.
Returns
-------
float
The metric to compare different variants of your Component
"""
return None
@property
def _config_str(self):
"""Represent object's architecture as a YAML string
Includes the extensions relevant to the object as well; NOTE:
currently this section may include a superset of the extensions
actually needed, but this will be changed in a future release.
"""
stream = None
if not hasattr(self, '_saved_kwargs'):
raise AttributeError(f"{type(self).__name__} object was not compiled from YAML (or "
"created via the factory method 'compile') and does not have an"
" associated config")
try:
config = ""
stream = StringIO()
try:
exts = self.aggregate_extensions_metadata()
if exts is not None and len(exts) > 0:
yaml.dump_all([exts, self], stream)
else:
yaml.dump(self, stream)
config = stream.getvalue()
except RepresenterError as re:
print(re)
logger.warn("Exception representing attribute in yaml... ", re)
finally:
if not stream.closed:
stream.close()
return config
except AttributeError as a:
if stream is not None and not stream.closed:
stream.close()
print(a)
raise AttributeError(f"{type(self).__name__} object was not compiled from YAML (or "
"created via the factory method 'compile') and does not have an"
"associated config")
except Exception as e:
if stream is not None and not stream.closed:
stream.close()
raise e
def register_attrs(self, *names: str) -> None:
"""Set attributes that should be included in state_dict
Equivalent to overriding `obj._state` and `obj._load_state` to
save and load these attributes. Recommended usage: call inside
`__init__` at the end: `self.register_attrs(attr1, attr2, ...)`
Should ONLY be called on existing attributes.
Parameters
----------
*names : str
The names of the attributes to register
Raises
-------
AttributeError
If `self` does not have existing attribute with that name
"""
if not hasattr(self, '_registered_attributes'):
self._registered_attributes: Set[str] = set()
for name in names:
if not hasattr(self, name):
raise AttributeError(f"{type(self).__name__} object has no attribute {name}, so "
"it cannot be registered")
self._registered_attributes.update(names)
@staticmethod
def _state_dict_hook(self,
state_dict: State,
prefix: str,
local_metadata: Dict[str, Any]) -> State:
"""Add metadata and recurse on Component children
This hook is used to integrate with the PyTorch `state_dict`
mechanism; as either `nn.Module.state_dict` or
`Component.get_state` recurse, this hook is responsible for
adding Flambe specific metadata and recursing further on any
Component children of `self` that are not also nn.Modules,
as PyTorch will handle recursing to the latter.
Flambe specific metadata includes the class version specified
in the `Component._flambe_version` class property, the name
of the class, the source code, and the fact that this class is
a `Component` and should correspond to a directory in our
hiearchical save format
Finally, this hook calls a helper `_state` that users can
implement to add custom state to a given class
Parameters
----------
state_dict : State
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
prefix : str
The current prefix for new compound keys that reflects the
location of this instance in the object hierarchy being
represented
local_metadata : Dict[str, Any]
A subset of the metadata relevant just to this object and
its children
Returns
-------
type
The modified state_dict
Raises
-------
ExceptionName
Why the exception is raised.
"""
warn_use_state = False
if FLAMBE_DIRECTORIES_KEY not in state_dict._metadata:
state_dict._metadata[FLAMBE_DIRECTORIES_KEY] = set()
warn_use_state = True
if KEEP_VARS_KEY not in state_dict._metadata:
state_dict._metadata[KEEP_VARS_KEY] = False
warn_use_state = True
if warn_use_state:
warn("Use '.get_state()' on flambe objects, not state_dict "
f"(from {type(self).__name__})")
# 1 need to add in any extras like config
local_metadata[VERSION_KEY] = self._flambe_version
local_metadata[FLAMBE_CLASS_KEY] = type(self).__name__
local_metadata[FLAMBE_SOURCE_KEY] = dill.source.getsource(type(self))
# All links should be relative to the current object `self`
with contextualized_linking(root_obj=self, prefix=prefix[:-1]):
try:
local_metadata[FLAMBE_CONFIG_KEY] = self._config_str
global _link_obj_stash
if len(_link_obj_stash) > 0:
local_metadata[FLAMBE_STASH_KEY] = copy.deepcopy(_link_obj_stash)
except AttributeError:
pass
# 2 need to recurse on Components
# Iterating over __dict__ does NOT include pytorch children
# modules, parameters or buffers
# torch.optim.Optimizer does exist so ignore mypy
for name, attr in self.__dict__.items():
if isinstance(attr, Component) and not isinstance(attr, (
torch.optim.Optimizer, torch.optim.lr_scheduler._LRScheduler)): # type: ignore
current_path = prefix + name
# If self is not nn.Module, need to recurse because
# that will not happen elsewhere
# If self *is* an nn.Module, don't need to recurse on
# child nn.Module's because pytorch will already do
# that; just recurse on non-nn.Module's
# The latter case shouldn't happen, this is just an
# extra check for safety;
# child modules are not stored in __dict__
if not isinstance(self, torch.nn.Module) or not isinstance(attr, torch.nn.Module):
state_dict = attr.get_state(destination=state_dict,
prefix=current_path + STATE_DICT_DELIMETER,
keep_vars=state_dict._metadata[KEEP_VARS_KEY])
state_dict._metadata[FLAMBE_DIRECTORIES_KEY].add(current_path)
# Iterate over modules to make sure Component
# nn.Modules are added to flambe directories
if isinstance(self, torch.nn.Module):
for name, module in self.named_children():
if isinstance(module, Component):
current_path = prefix + name
state_dict._metadata[FLAMBE_DIRECTORIES_KEY].add(current_path)
state_dict = self._add_registered_attrs(state_dict, prefix)
state_dict = self._state(state_dict, prefix, local_metadata)
return state_dict
def _state(self, state_dict: State, prefix: str, local_metadata: Dict[str, Any]) -> State:
"""Add custom state to state_dict
Parameters
----------
state_dict : State
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
prefix : str
The current prefix for new compound keys that reflects the
location of this instance in the object hierarchy being
represented
local_metadata : Dict[str, Any]
A subset of the metadata relevant just to this object and
its children
Returns
-------
State
The modified state_dict
"""
return state_dict
def get_state(self,
destination: Optional[State] = None,
prefix: str = '',
keep_vars: bool = False) -> State:
"""Extract PyTorch compatible state_dict
Adds Flambe specific properties to the state_dict, including
special metadata (the class version, source code, and class
name). By default, only includes state that PyTorch `nn.Module`
includes (Parameters, Buffers, child Modules). Custom state can
be added via the `_state` helper method which subclasses should
override.
The metadata `_flambe_directories` indicates which objects are
Components and should be a subdirectory in our hierarchical
save format. This object will recurse on `Component` and
`nn.Module` children, but NOT `torch.optim.Optimizer`
subclasses, `torch.optim.lr_scheduler._LRScheduler` subclasses,
or any other arbitrary python objects.
Parameters
----------
destination : Optional[State]
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
prefix : str
The current prefix for new compound keys that reflects the
location of this instance in the object hierarchy being
represented
keep_vars : bool
Whether or not to keep Variables (only used by PyTorch)
(the default is False).
Returns
-------
State
The state_dict object
Raises
-------
ExceptionName
Why the exception is raised.
"""
if destination is None:
destination = State()
destination._metadata = OrderedDict({FLAMBE_DIRECTORIES_KEY: set(),
KEEP_VARS_KEY: keep_vars})
destination._metadata[FLAMBE_DIRECTORIES_KEY].add(prefix)
if isinstance(self, torch.nn.Module):
destination = self.state_dict(destination, prefix, keep_vars)
# torch.optim.Optimizer does exist so ignore mypy
elif isinstance(self, (torch.optim.Optimizer, # type: ignore
torch.optim.lr_scheduler._LRScheduler)):
pass
else:
local_metadata: Dict[str, Any] = {}
destination._metadata[prefix[:-1]] = local_metadata
destination = self._state_dict_hook(self, destination, prefix, local_metadata)
return destination # type: ignore
def _load_state_dict_hook(self,
state_dict: State,
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[Any],
unexpected_keys: List[Any],
error_msgs: List[Any]) -> None:
"""Load flambe-specific state
Parameters
----------
state_dict : State
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
prefix : str
The current prefix for new compound keys that reflects the
location of this instance in the object hierarchy being
represented
local_metadata : Dict[str, Any]
A subset of the metadata relevant just to this object and
its children
strict : bool
Whether missing or unexpected keys should be allowed;
should always be False in Flambe
missing_keys : List[Any]
Missing keys so far
unexpected_keys : List[Any]
Unexpected keys so far
error_msgs : List[Any]
Any error messages so far
Raises
-------
LoadError
If the state for some object does not have a matching major
version number
"""
# Custom subclass behavior
self._load_state(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs)
self._load_registered_attrs(state_dict, prefix)
# Check state compatibility
version = local_metadata[VERSION_KEY].split('.')
if min(map(int, version)) > 0:
# Opt-in to semantic versioning
versions = local_metadata[VERSION_KEY], type(self)._flambe_version
load_version, current_version = map(lambda x: x.split('.'), versions)
if load_version[0] != current_version[0]:
raise LoadError(f'Incompatible Versions: {load_version} and {current_version}')
if load_version[1] != current_version[1]:
logger.warn(f'Differing Versions (Minor): {load_version} and {current_version}')
if load_version[2] != current_version[2]:
logger.debug(f'Differing Versions (Patch): {load_version} and {current_version}')
else:
original_source = local_metadata[FLAMBE_SOURCE_KEY]
current_source = dill.source.getsource(type(self))
if original_source != current_source:
# Warn / Error
logger.warn(f"Source code for object {self} does not match the source code saved "
f"with the state dict\nSource code: {current_source}\n"
f"Original source code:{original_source}\n")
def _load_state(self,
state_dict: State,
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[Any],
unexpected_keys: List[Any],
error_msgs: List[Any]) -> None:
"""Load custom state (that was included via `_state`)
Subclasses should override this function to add custom state
that isn't normally included by PyTorch nn.Module
Parameters
----------
state_dict : State
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
prefix : str
The current prefix for new compound keys that reflects the
location of this instance in the object hierarchy being
represented
local_metadata : Dict[str, Any]
A subset of the metadata relevant just to this object and
its children
strict : bool
Whether missing or unexpected keys should be allowed;
should always be False in Flambe
missing_keys : List[Any]
Missing keys so far
unexpected_keys : List[Any]
Unexpected keys so far
error_msgs : List[Any]
Any error messages so far
"""
pass
def load_state(self, state_dict: State, strict: bool = False) -> None:
"""Load `state_dict` into `self`
Loads state produced by `get_state` into the current object,
recursing on child `Component` and `nn.Module` objects
Parameters
----------
state_dict : State
The state_dict as defined by PyTorch; a flat dictionary
with compound keys separated by '.'
strict : bool
Whether missing or unexpected keys should be allowed;
should ALWAYS be False in Flambe (the default is False).
Raises
-------
LoadError
If the state for some object does not have a matching major
version number
"""
missing_keys: List[str] = []
unexpected_keys: List[str] = []
error_msgs: List[str] = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# For loading, the _load_from_state_dict and
# _load_state_dict_hook are NOT recursive.
# We emulate PyTorch's structure by having a recursive
# helper here, for compatibility reasons.
load(self)
# PyTorch 1.1 error handling
if strict:
if len(unexpected_keys) > 0:
error_msgs.insert(0, 'Unexpected key(s) in state_dict: '
f'{", ".join(f"{k}" for k in unexpected_keys)}. ')
if len(missing_keys) > 0:
error_msgs.insert(0, 'Missing key(s) in state_dict: '
f'{", ".join(f"{k}" for k in missing_keys)}. ')
if len(error_msgs) > 0:
newline_tab = '\n\t'
raise RuntimeError('Error(s) in loading state_dict for '
f'{self.__class__.__name__}:{newline_tab}'
f'{newline_tab.join(error_msgs)}')
@registrable_factory
@classmethod
@classmethod
@classmethod
@classmethod
def setup_dependencies(cls: Type[C], kwargs: Dict[str, Any]) -> None:
"""Add default links to kwargs for cls; hook called in compile
For example, you may want to connect model parameters to the
optimizer by default, without requiring users to specify this
link in the config explicitly
Parameters
----------
cls : Type[C]
Class on which method is called
kwargs : Dict[str, Any]
Current kwargs that should be mutated directly to include
links
"""
return
@classmethod
def precompile(cls: Type[C], **kwargs: Any) -> None:
"""Change kwargs before compilation occurs.
This hook is called after links have been activated, but before
calling the recursive initialization process on all other
objects in kwargs. This is useful in a number of cases, for
example, in Trainer, we compile several objects ahead of time
and move them to the GPU before compiling the optimizer,
because it needs to be initialized with the model parameters
*after* they have been moved to GPU.
Parameters
----------
cls : Type[C]
Class on which method is called
**kwargs : Any
Current kwargs that will be compiled and used to initialize
an instance of cls after this hook is called
"""
return
def aggregate_extensions_metadata(self) -> Dict[str, str]:
"""Aggregate extensions used in object hierarchy
TODO: remove or combine with schema implementation in refactor
"""
# non-nested so shallow copy ok
exts = dict(self._extensions or {}) # type: ignore
for child in self._saved_kwargs.values(): # type: ignore
helper(child)
return exts
@classmethod
def compile(cls: Type[C],
_flambe_custom_factory_name: Optional[str] = None,
_flambe_extensions: Optional[Dict[str, str]] = None,
_flambe_stash: Optional[Dict[str, Any]] = None,
**kwargs: Any) -> C:
"""Create instance of cls after recursively compiling kwargs
Similar to normal initialization, but recursively initializes
any arguments that should be compiled and allows overriding
arbitrarily deep kwargs before initializing if needed. Also
activates any Link instances passed in as kwargs, and saves
the original kwargs for dumping to yaml later.
Parameters
----------
**kwargs : Any
Keyword args that should be forwarded to the initialization
function (a specified factory, or the normal `__new__`
and `__init__` methods)
Returns
-------
C
An instance of the class `cls`
"""
extensions: Dict[str, str] = _flambe_extensions or {}
stash: Dict[str, Any] = _flambe_stash or {}
# Set additional links / default links
cls.setup_dependencies(kwargs)
# Activate links all links
processed_kwargs = activate_links(kwargs) # TODO maybe add to helper for collections
processed_kwargs = activate_stash_refs(processed_kwargs, stash)
# Modify kwargs, optionally compiling and updating any of them
cls.precompile(**processed_kwargs)
# Recursively compile any remaining un-compiled kwargs
newkeywords = helper(processed_kwargs)
# Check for remaining yaml types
for kw in newkeywords:
if isinstance(newkeywords[kw], YAML_TYPES):
msg = f"'{cls}' property '{kw}' is still yaml type {type(newkeywords[kw])}\n"
msg += f"This could be because of a typo or the class is not registered properly"
warn(msg)
# Find intended constructor in case using some factory
factory_method: Callable[..., Any] = cls
if _flambe_custom_factory_name is not None:
factory_method = getattr(cls, _flambe_custom_factory_name)
# Replace non link Schemas with compiled objects in kwargs
# for dumping
kwargs_non_links_compiled = merge_kwargs(kwargs, newkeywords)
# Fill the *original* kwargs with defaults specified by factory
kwargs_with_defaults = fill_defaults(kwargs_non_links_compiled, factory_method)
# Creat the compiled instance of `cls`
try:
instance = factory_method(**newkeywords)
except TypeError as te:
print(f"class {cls} method {_flambe_custom_factory_name} failed with "
f"keyword args:\n{newkeywords}")
raise te
# Record kwargs used for compilation for YAML dumping later
# Includes defaults for better safety / reproducibility
instance._saved_kwargs = kwargs_with_defaults
instance._extensions = extensions
return instance
def dynamic_component(class_: Type[A],
tag: str,
tag_namespace: Optional[str] = None) -> Type[Component]:
"""Decorate given class, creating a dynamic `Component`
Creates a dynamic subclass of `class_` that inherits from
`Component` so it will be registered with the yaml loader and
receive the appropriate functionality (`from_yaml`, `to_yaml` and
`compile`). `class_` should not implement any of the aforementioned
functions.
Parameters
----------
class_ : Type[A]
Class to register with yaml and the compilation system
tag : str
Tag that will be used with yaml
tag_namespace : str
Namespace aka the prefix, used. e.g. for `!torch.Adam` torch is
the namespace
Returns
-------
Type[Component]
New subclass of `_class` and `Component`
"""
if issubclass(class_, Component):
return class_
# Create new subclass of `class_` and `Component`
# Ignore mypy, extra kwargs are okay in python 3.6+ usage of type
# and Registrable uses them
new_component = type(class_.__name__, # type: ignore
(Component, class_),
{},
tag_override=tag,
tag_namespace=tag_namespace) # type: ignore
# Copy over class attributes so it still looks like the original
# Useful for inspection and debugging purposes
_MISSING = object()
for k in WRAPPER_ASSIGNMENTS:
v = getattr(class_, k, _MISSING)
if v is not _MISSING:
try:
setattr(new_component, k, v)
except AttributeError:
pass
return new_component
| [
2,
422,
11593,
37443,
834,
1330,
37647,
198,
11748,
10104,
198,
11748,
288,
359,
198,
11748,
18931,
198,
6738,
14601,
1330,
9828,
198,
6738,
19720,
1330,
5994,
11,
5994,
19852,
11,
4377,
11,
337,
5912,
11,
360,
713,
11,
32233,
11,
734... | 2.346689 | 17,471 |
from .server import console
from .database import migration, model
from .http import route, controller
from .tools import helper
def run_server(server):
"""
"""
return console.run(server)
def create_migration(name):
"""
:param name:
:return:
"""
return migration.create_migration(name)
def migrate(name):
"""
Executa migrações de bancos
:param name: string
:return:
"""
return migration.run_migrate(name)
def create_model(name):
"""
Criar um novo arquivo de interação com banco de dados.
"""
return model.create_model(name)
def create_controller(name):
"""
"""
return controller.create_controller(name)
def create_route(name):
"""
"""
return route.create_route(name)
def generate_secret_key():
"""
Generate new Secret Key
"""
return helper.generate_secret_key()
| [
6738,
764,
15388,
1330,
8624,
198,
6738,
764,
48806,
1330,
13472,
11,
2746,
198,
6738,
764,
4023,
1330,
6339,
11,
10444,
198,
6738,
764,
31391,
1330,
31904,
628,
198,
4299,
1057,
62,
15388,
7,
15388,
2599,
198,
220,
220,
220,
37227,
1... | 2.663664 | 333 |
import datetime as dt
import json
from typing import List, Optional
from uuid import UUID
from fastapi.encoders import jsonable_encoder
from injector import singleton, inject
from common.cache import fail_silently, hash_cache_key
from common.injection import Cache
from database.utils import map_to
from post.models import Post
@singleton
| [
11748,
4818,
8079,
355,
288,
83,
198,
11748,
33918,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
3049,
15042,
13,
12685,
375,
364,
1330,
33918,
540,
62,
12685,
12342,
198,
6738,
8677,... | 3.648936 | 94 |
# -*- coding: utf-8 -*-
# @createTime : 2020/5/12 8:46
# @author : Huanglg
# @fileName: table.py
# @email: luguang.huang@mabotech.com
# -*- coding: utf-8 -*-
import json
import os
import time
import traceback
import natsort
import shutil
from PIL import Image, ImageEnhance
import tr
import cv2
import numpy as np
import config
import constants
from utils.Logger import Logger
from utils.RedisHelper import MyRedis
log = Logger()
try:
redis = MyRedis(host=config.REDIS_HOST, port=config.REDIS_PORT, password=config.REDIS_PASSWORD)
except Exception:
log.error(traceback.format_exc())
np_base_columns = np.array([])
text_base_columns = str()
def pdf_to_table(pdf_path):
"""
:param pdf_path:
:return: [table_png_path]
"""
filename = os.path.split(pdf_path)[1]
project_path = os.getcwd()
out_dir = os.path.join(project_path, 'output', filename)
# 如果之前存在,先删除
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
pdf_to_table_commond = 'pdftohtml -c -hidden -xml "{0}" "{1}"'.format(pdf_path,
os.path.join(out_dir, filename + '.xml'))
print(pdf_to_table_commond)
ret = run_shell_cmd(pdf_to_table_commond)
if ret:
all_png_absolute_path = scan_file(out_dir)
return all_png_absolute_path
def cut_img(img, coordinate):
"""
根据坐标位置剪切图片
:param img 图片路径或者, Image 对象, 或者numpy数组
:param coordinate: 原始图片上的坐标(tuple) egg:(x, y, w, h) ---> x,y为矩形左上角坐标, w,h为右下角坐标
:return:
"""
# image = Image.open(imgsrc)
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
else:
if isinstance(img, str):
img = Image.open(img)
elif isinstance(img, Image.Image):
img = img
else:
raise NotImplementedError()
region = img.crop(coordinate)
region = ImageEnhance.Contrast(region).enhance(1.5)
return region
def correct_text(key, text):
"""
矫正OCR后的文字
:param key:
:param text:
:return:
"""
if key == 'title1':
return text.replace('<>', '').replace('母', '').replace('团', '')
elif key == 'title2':
if text and text[0] == 7:
text = text.replace('7', 'Z')
return text.replace('|', '').replace('乙轴', 'Z轴')
elif key == 'column_name':
return text.replace('+TOL TOL', '+TOL -TOL').replace('特征NOMINAL', '特征 NOMINAL')
else:
return text
def image_to_text(image_path, index, actual_filename):
"""
:param image_path: pdf 切割后每个小图片的路径
:param index: 索引
:param serial_num: pdf 序列号,也叫车辆架构号
:return:
"""
image = Image.open(image_path)
# index=0 表头信息
if index == 0:
scope = constants.header_scope
result = {}
for k, v in scope.items():
# 序列号从文件名中取,不在图片中进行识别
if k == 'serialnum':
continue
img = cut_img(img=image, coordinate=v)
# 表头高度有轻微变化,用run_angle检测识别出范围
tr_result = tr.run_angle(img)
text = None
for item in tr_result:
if item[1] == 'r':
continue
else:
text = correct_text(k, item[1])
result[k] = text
log.info('index:{0}, text:{1}'.format(*(index, result)))
redis.hash_set(actual_filename, index, json.dumps(result))
else:
scope = constants.normal_scope
result = {}
for k, v in scope.items():
img = cut_img(img=image, coordinate=v)
# 列名有很多重复的,判断是否和最新识别过的相同,避免在ORC的时候浪费时间
if k == 'column_name':
np_column = np.array(img)
global np_base_columns, text_base_columns
# 判断和之前的列名是否一样,避免重复识别,提高速度
if (np_column.shape == np_base_columns.shape) and (
not np.any(cv2.subtract(np_base_columns, np_column))):
result[k] = text_base_columns
continue
else:
result[k] = correct_text(k, tr.recognize(img)[0])
# 更新要对比的基础列名
text_base_columns = result[k]
np_base_columns = np_column
# 这种需要单独处理 如果用 tr.recognize 有些测试值空格识别不出来,会粘在一起
# 如果title1 里面有K,则显示用title1
elif k == 'test_value' and 'K' in result['title1']:
text = ''
tr_result = tr.run_angle(img)
for i, item in enumerate(tr_result):
# 第四列是-TOL,填充一个空置
if i == 3:
text = text + ' ' + 'null' + ' ' + item[1]
else:
text = text + ' ' + item[1]
# 末尾填充null,为了防止BONUS有些pdf里没有值,没法一一对应
text = text + ' ' + 'null'
text = text.strip()
# 如果加了 'null' 之后,column_name 和test_value 长度不同,说明不需要null占位,需要去除
if len(result['column_name'].split(' ')) != len(text.split(' ')):
text = text.replace(' null', '')
result[k] = text
print(result)
else:
result[k] = correct_text(k, tr.recognize(img)[0])
log.info('index:{0}, text:{1}'.format(*(index, result)))
redis.hash_set(actual_filename, index, json.dumps(result))
if __name__ == '__main__':
parse_pdf(pdf_path=r"""example/0e7c4f5aba7511eab5fc0242ac110004.PDF""")
# parse_pdf(pdf_path=r"""example/250070004011191100001.PDF""")
# run(pdf_path=r"""example/160/19101132 2020.03.14.PDF""")
# run(pdf_path=r"""example/160/19.11.13 106795 .PDF""")
# run(pdf_path=r"""example/250/250070004011191200001.PDF""")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
17953,
7575,
220,
220,
220,
1058,
12131,
14,
20,
14,
1065,
807,
25,
3510,
198,
2,
2488,
9800,
220,
1058,
31663,
75,
70,
198,
2,
2488,
7753,
5376,
25,
3084,
... | 1.657168 | 3,474 |
#!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
834,
9800,
834,
796,
366,
43145,
1,
198,
834,
12888,
834,
796,
366,
43145,
1,
198
] | 2.36 | 25 |
import pathlib
import heapq
from collections import Counter
ANSWER_LINE_START = "export const answers = ["
# The most frequent letters are worth the most points.
if __name__ == "__main__":
run()
| [
11748,
3108,
8019,
198,
11748,
24575,
80,
198,
6738,
17268,
1330,
15034,
198,
198,
15037,
45532,
62,
24027,
62,
2257,
7227,
796,
366,
39344,
1500,
7429,
796,
14631,
198,
198,
2,
383,
749,
10792,
7475,
389,
2861,
262,
749,
2173,
13,
62... | 3.383333 | 60 |
import numpy as np
import os
data_root = '/tigress/qlu/data/keras-nn-srm/data/'
data_format = '.npz'
def load_data(data_name):
"""
data avail: 'cifar10', 'cifar100', 'mnist_std', 'mnist_conv'
data_info = [num_classes, img_rows, img_cols, img_channels]
"""
data_path = os.path.join(data_root, data_name + data_format)
data = np.load(data_path)
return unpack_data(data) | [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
28686,
220,
198,
198,
7890,
62,
15763,
796,
31051,
83,
328,
601,
14,
80,
2290,
14,
7890,
14,
6122,
292,
12,
20471,
12,
82,
26224,
14,
7890,
14,
6,
198,
7890,
62,
18982,
796,
45302,
3... | 2.259887 | 177 |
# Copyright Aleksey Gurtovoy 2001-2004
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# See http://www.boost.org/libs/mpl for documentation.
# $Source: /cvsroot/boost/boost/libs/mpl/preprocessed/preprocess_map.py,v $
# $Date: 2004/12/14 12:57:14 $
# $Revision: 1.3 $
import preprocess
preprocess.main(
[ "plain", "typeof_based", "no_ctps" ]
, "map"
, "boost\\mpl\\map\\aux_\\preprocessed"
)
| [
198,
2,
15069,
9300,
74,
4397,
402,
3325,
709,
726,
5878,
12,
15724,
198,
2,
198,
2,
4307,
6169,
739,
262,
19835,
10442,
13789,
11,
10628,
352,
13,
15,
13,
220,
198,
2,
357,
6214,
19249,
2393,
38559,
24290,
62,
16,
62,
15,
13,
1... | 2.39726 | 219 |
from numpy import *
for i in range(1,7,1):
points = genfromtxt("data"+str(i)+".csv", delimiter=",")
learning_rate = 0.001
initial_b , initial_m , num_iterations = 0 ,0 ,1000
print ("\nGradient descent for dataset = {0} at b = {1}, m = {2}, error = {3}".format(i,initial_b, initial_m, compute_error(initial_b, initial_m, points)))
[b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_iterations)
print ("After {0} iterations b = {1}, m = {2}, error = {3}".format(num_iterations, b, m, compute_error(b, m, points)))
print("\n---------------------------------------------------------------------------------------------------------------------------------\n")
'''
Output
Gradient descent for dataset = 1 at b = 0, m = 0, error = 6502955270.733334
After 1000 iterations b = 10161.31658856065, m = 11769.520009282598, error = 84089419.33650169
---------------------------------------------------------------------------------------------------------------------------------
Gradient descent for dataset = 2 at b = 0, m = 0, error = 47211002683.5
After 1000 iterations b = 31900.62277857377, m = 21319.8811872779, error = 21946106049.42964
---------------------------------------------------------------------------------------------------------------------------------
Gradient descent for dataset = 3 at b = 0, m = 0, error = 6502955270.733334
After 1000 iterations b = 21414.65481569248, m = 8288.87689010492, error = 539029448.1360085
---------------------------------------------------------------------------------------------------------------------------------
Gradient descent for dataset = 4 at b = 0, m = 0, error = 6502955270.733334
After 1000 iterations b = 2719.0330204350334, m = 14956.393295560889, error = 55805015.11189775
---------------------------------------------------------------------------------------------------------------------------------
Gradient descent for dataset = 5 at b = 0, m = 0, error = 10852955226.80859
After 1000 iterations b = 2657.839924328486, m = 16915.60258342321, error = 62387115.0964353
---------------------------------------------------------------------------------------------------------------------------------
Gradient descent for dataset = 6 at b = 0, m = 0, error = 12606666.666666666
After 1000 iterations b = 6.1105371515294795, m = 199.69925549971327, error = 8.87560026950874
---------------------------------------------------------------------------------------------------------------------------------
''' | [
6738,
299,
32152,
1330,
1635,
628,
198,
1640,
1312,
287,
2837,
7,
16,
11,
22,
11,
16,
2599,
198,
220,
220,
220,
2173,
796,
2429,
6738,
14116,
7203,
7890,
1,
10,
2536,
7,
72,
47762,
1911,
40664,
1600,
46728,
2676,
28,
2430,
8,
198,... | 3.890244 | 656 |
from scapy.all import IP, TCP, sr, conf
from random import randint
import sys
if __name__ == "__main__":
if not (len(sys.argv) == 2):
print(f"usage: {sys.argv[0]} target_address")
else:
answer, no_answer = [], []
try:
open_ports, closed_ports, no_answer_ports = pscan(sys.argv[1])
except OSError as e:
print(f'Invalid host: {e}')
print(f'closed ports: {closed_ports}\n')
print(f'open ports: {open_ports}\n')
print(f'no answer ports: {no_answer_ports}\n')
| [
6738,
629,
12826,
13,
439,
1330,
6101,
11,
23633,
11,
19677,
11,
1013,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
407,
357,
... | 2.192623 | 244 |
from .models import Question, Answer
from django.forms import ModelForm
from django import forms
| [
6738,
764,
27530,
1330,
18233,
11,
23998,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14208,
1330,
5107,
628,
198
] | 4.125 | 24 |
from .SpecEvaluation import SpecEvaluation
from .SpecEvaluations import SpecEvaluations
class ProblemConstraintsEvaluations(SpecEvaluations):
"""Special multi-evaluation class for all constraints of a same problem.
See submethod ``.from_problem``
"""
specifications_role = "constraint"
@staticmethod
def from_problem(problem, autopass_constraints=True):
"""Create an instance by evaluating all constraints in the problem.
The ``problem`` is a DnaChisel DnaOptimizationProblem.
"""
return ProblemConstraintsEvaluations(
[evaluate(constraint) for constraint in problem.constraints],
problem=problem,
)
def success_failure_color(self, evaluation):
"""Return color #60f979 if evaluation.passes else #f96c60."""
return "#60f979" if evaluation.passes else "#f96c60"
def text_summary_message(self):
"""Return a global SUCCESS or FAILURE message for all evaluations."""
failed = [e for e in self.evaluations if not e.passes]
if failed == []:
return "SUCCESS - all constraints evaluations pass"
else:
return "FAILURE: %d constraints evaluations failed" % len(failed)
| [
6738,
764,
22882,
36,
2100,
2288,
1330,
18291,
36,
2100,
2288,
198,
6738,
764,
22882,
36,
2100,
6055,
1330,
18291,
36,
2100,
6055,
628,
198,
4871,
20647,
3103,
2536,
6003,
36,
2100,
6055,
7,
22882,
36,
2100,
6055,
2599,
198,
220,
220,... | 2.736726 | 452 |
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import json
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import mail_admins
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
7007,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625... | 3.84375 | 64 |
'''This package contains the data structures for the :mod:`researchers`
There is just one model: :class:`~researchers.models.Researcher`
There is also a helper function create_user_profile which creates a new :class:`~researchers.models.Researcher` object for each User object.
'''
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.db import models
from django.template.defaultfilters import slugify
class Researcher(models.Model):
'''This model is for researcher data.
This is this project's UserProfile model and is generated when a new :class:`~django.contrib.auth.models.User` object is created.'''
user = models.OneToOneField(User)
current_lab_member = models.BooleanField(help_text = "Is this a current member of this group?")
def __unicode__(self):
'''The unicode representation for a Personnel object is its full name.'''
return self.user.get_full_name()
@models.permalink
def get_absolute_url(self):
'''the permalink for a paper detail page is /researcher/1 where user is the researcher id.'''
return ('researcher-details', [int(self.id)])
def create_user_profile(sender, instance, created, **kwargs):
'''This signal generates a new :class:`~researchers.models.Researcher` object for any new :class:`~django.contrib.auth.models.User` objects.'''
if created:
Researcher.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
| [
7061,
6,
1212,
5301,
4909,
262,
1366,
8573,
329,
262,
1058,
4666,
25,
63,
260,
325,
283,
3533,
63,
198,
198,
1858,
318,
655,
530,
2746,
25,
1058,
4871,
25,
63,
93,
260,
325,
283,
3533,
13,
27530,
13,
4965,
50194,
63,
198,
1858,
... | 2.89434 | 530 |
'''Independent-running GSAS-II based auto-integration program with minimal
GUI, no visualization but intended to implement significant levels of
parallelization.
'''
# Autointegration from
# $Id: GSASIIimgGUI.py 3926 2019-04-23 18:11:07Z toby $
# hacked for stand-alone use
#
# idea: select image file type & set filter from that
#
from __future__ import division, print_function
import os
import copy
import glob
import time
import re
import math
import sys
import wx
import wx.lib.mixins.listctrl as listmix
import wx.grid as wg
import numpy as np
import GSASIIpath
GSASIIpath.SetBinaryPath(True)
GSASIIpath.SetVersionNumber("$Revision: $")
import GSASIIIO as G2IO
import GSASIIctrlGUI as G2G
import GSASIIobj as G2obj
import GSASIIpy3 as G2py3
import GSASIIimgGUI as G2imG
import GSASIIfiles as G2fil
import GSASIIscriptable as G2sc
import multiprocessing as mp
class AutoIntFrame(wx.Frame):
'''Creates a wx.Frame window for the Image AutoIntegration.
The intent is that this will be used as a non-modal dialog window.
Implements a Start button that morphs into a pause and resume button.
This button starts a processing loop that is repeated every
:meth:`PollTime` seconds.
:param wx.Frame G2frame: main GSAS-II frame
:param float PollTime: frequency in seconds to repeat calling the
processing loop. (Default is 30.0 seconds.)
'''
def SetSourceDir(self,event):
'''Use a dialog to get a directory for image files
'''
dlg = wx.DirDialog(self, 'Select directory for image files',
self.params['readdir'],wx.DD_DEFAULT_STYLE)
dlg.CenterOnParent()
try:
if dlg.ShowModal() == wx.ID_OK:
self.params['readdir'] = dlg.GetPath()
self.readDir.SetValue(self.params['readdir'])
self.ShowMatchingFiles(None)
finally:
dlg.Destroy()
return
def ShowMatchingFiles(self,value,invalid=False,**kwargs):
'''Find and image files matching the image
file directory (self.params['readdir']) and the image file filter
(self.params['filter']) and add this information to the GUI list box
'''
if invalid: return
if self.PreventReEntryShowMatch: return
self.PreventReEntryShowMatch = True
filmsg = ""
self.currImageList = []
if os.path.exists(self.params['readdir']):
imageList = sorted(
glob.glob(os.path.join(self.params['readdir'],self.params['filter'])))
if not imageList:
msg = 'Warning: No files match search string '+os.path.join(
self.params['readdir'],self.params['filter'])
else:
for fil in imageList:
if fil not in self.ProcessedList:
filmsg += '\n '+fil
self.currImageList.append(fil)
if filmsg:
msg = 'Files to integrate from '+os.path.join(
self.params['readdir'],self.params['filter'])+filmsg
else:
msg = 'No files found to process in '+self.params['readdir']
else:
msg = 'Warning, does not exist: '+self.params['readdir']
if self.ProcessedList:
msg += '\nIntegrated files:'
for fil in self.ProcessedList:
msg += '\n '+fil
self.ListBox.Clear()
self.ListBox.AppendItems(msg.split('\n'))
self.PreventReEntryShowMatch = False
return
def OnPause(self):
'''Respond to Pause, changes text on button/Status line, if needed
Stops timer
self.Pause should already be True
'''
if self.timer.IsRunning(): self.timer.Stop()
if self.btnstart.GetLabel() == 'Restart':
return
if self.btnstart.GetLabel() != 'Resume':
print('\nPausing autointegration\n')
self.btnstart.SetLabel('Resume')
self.Status.SetStatusText(
'Press Resume to continue integration or Reset to prepare to reintegrate all images')
self.Pause = True
def StartLoop(self):
'''Prepare to start autointegration timer loop.
Save current Image params for use in future integrations
also label the window so users understand what is being used
'''
print('\nStarting new autointegration\n')
# make sure all output directories exist
if self.params['SeparateDir']:
for dfmt in self.fmtlist:
if not self.params['outsel'][dfmt[1:]]: continue
dir = os.path.join(self.params['outdir'],dfmt[1:])
if not os.path.exists(dir): os.makedirs(dir)
else:
if not os.path.exists(self.params['outdir']):
os.makedirs(self.params['outdir'])
if self.Reset: # special things to do after Reset has been pressed
self.G2frame.IntegratedList = []
wx.Yield()
self.Reset = False
if self.params['ComputePDF'] and self.params['SeparateDir']:
for fmt in self.PDFformats:
if not self.params['outsel'][fmt]: continue
dir = os.path.join(self.params['outdir'],
fmt.replace("(","_").replace(")",""))
if not os.path.exists(dir): os.makedirs(dir)
return False
def ArgGen(self,PDFobj,imgprms,mskprms,xydata):
'''generator for arguments for integration/PDF calc
'''
for newImage in self.currImageList:
self.Pause |= self.G2frame.PauseIntegration
if self.Pause:
self.OnPause()
self.PreventTimerReEntry = False
self.Raise()
return
TableMode = self.params['TableMode']
ComputePDF = self.params['ComputePDF']
SeparateDir = self.params['SeparateDir']
optPDF = self.params['optPDF']
outdir = self.params['outdir']
calcModes = (TableMode,ComputePDF,SeparateDir,optPDF)
InterpVals = self.params.get('InterVals')
outputSelect = self.params['outsel']
PDFformats = self.PDFformats
outputModes = (outputSelect,PDFformats,self.fmtlist,outdir)
if PDFobj:
PDFdict = PDFobj.data
else:
PDFdict = None
yield (newImage,imgprms,mskprms,xydata,PDFdict,InterpVals,calcModes,outputModes)
def OnTimerLoop(self,event):
'''A method that is called every :meth:`PollTime` seconds that is
used to check for new files and process them. Integrates new images.
Also optionally sets up and computes PDF.
This is called only after the "Start" button is pressed (then its label reads "Pause").
'''
if GSASIIpath.GetConfigValue('debug'):
import datetime
print ("DBG_Timer tick at {:%d %b %Y %H:%M:%S}\n".format(datetime.datetime.now()))
if self.PreventTimerReEntry: return
self.PreventTimerReEntry = True
self.ShowMatchingFiles(None)
if not self.currImageList:
self.PreventTimerReEntry = False
return
updateList = False
# get input for integration
imgprms = mskprms = None
if not self.params['TableMode']:
# read in image controls/masks, used below in loop. In Table mode
# we will get this image-by image.
gpxinp = G2sc.G2Project(self.gpxin[3])
print('reading template project',gpxinp.filename)
img = gpxinp.image(self.imprm[1].GetStringSelection())
imgprms = img.getControls(True)
if self.maskfl[1].GetStringSelection().strip():
img = gpxinp.image(self.maskfl[1].GetStringSelection())
mskprms = img.getMasks()
# setup shared input for PDF computation (for now will not be table mode)
xydata = {}
if self.params['ComputePDF']:
pdfEntry = self.pdfSel.GetStringSelection()
try:
PDFobj = gpxinp.pdf(pdfEntry)
except KeyError:
print("PDF entry not found: {}".format(pdfEntry))
# update with GUI input
for i,lbl in enumerate(('Sample Bkg.','Container',
'Container Bkg.')):
name = self.pbkg[i][1].GetStringSelection()
try:
xydata[lbl] = gpxinp.histogram(name).data['data']
except AttributeError:
pass
PDFobj.data['PDF Controls'][lbl]['Mult'] = self.pbkg[i][6]
PDFobj.data['PDF Controls'][lbl]['Name'] = name
else:
PDFobj = None
if self.MPpool:
self.MPpool.imap_unordered(ProcessImageMP,
self.ArgGen(PDFobj,imgprms,mskprms,xydata))
else:
for intArgs in self.ArgGen(PDFobj,imgprms,mskprms,xydata):
newImage = intArgs[0]
print('processing ',newImage)
ProcessImage(*intArgs)
updateList = True
for newImage in self.currImageList:
self.ProcessedList.append(newImage)
if updateList: self.ShowMatchingFiles(None)
self.PreventTimerReEntry = False
self.Raise()
MapCache = {'maskMap':{}, 'ThetaAzimMap':{}, 'distanceList':[]}
'caches for TA and Mask maps'
def ProcessImage(newImage,imgprms,mskprms,xydata,PDFdict,InterpVals,calcModes,outputModes):
'''Process one image that is read from file newImage and is integrated into
one or more diffraction patterns and optionally each diffraction pattern can
be transformed into a pair distribution function.
:param str newImage: file name (full path) for input image
:param dict imgprms: dict with some nested lists & dicts describing the image
settings and integration parameters
:param dict mskprms: dict with areas of image to be masked
:param dict xydata: contains histogram information with about background
contributions, used for PDF computation (used if ComputePDF is True)
:param PDFdict: contains PDF parameters (used if ComputePDF is True)
:param InterpVals: contains interpolation table (used if TableMode is True)
:param tuple calcModes: set of values for which computations are
performed and how
:param tuple outputModes: determines which files are written and where
'''
(TableMode,ComputePDF,SeparateDir,optPDF) = calcModes
(outputSelect,PDFformats,fmtlist,outdir) = outputModes
if SeparateDir:
savedir = os.path.join(outdir,'gpx')
if not os.path.exists(savedir): os.makedirs(savedir)
else:
savedir = outdir
outgpx = os.path.join(savedir,os.path.split(os.path.splitext(newImage)[0]+'.gpx')[1])
gpxout = G2sc.G2Project(filename=outgpx)
print('creating',gpxout.filename)
# looped because a file can contain multiple images
if TableMode: # look up parameter values from table
imgprms,mskprms = LookupFromTable(im.data['Image Controls'].get('setdist'),
InterpVals)
for im in gpxout.add_image(newImage):
# apply image parameters
im.setControls(imgprms)
setdist = '{:.2f}'.format(im.getControls()['setdist']) # ignore differences in position less than 0.01 mm
if setdist not in MapCache['distanceList']:
if mskprms:
im.setMasks(mskprms)
else:
im.initMasks()
MapCache['distanceList'].append(setdist)
MapCache['maskMap'][setdist] = G2sc.calcMaskMap(im.getControls(),
im.getMasks())
MapCache['ThetaAzimMap'][setdist] = G2sc.calcThetaAzimMap(im.getControls())
# else: # debug
# print('*** reusing',setdist)
#if mskprms:
# im.setMasks(mskprms)
#else:
# im.initMasks()
hists = im.Integrate(MaskMap=MapCache['maskMap'][setdist],
ThetaAzimMap=MapCache['ThetaAzimMap'][setdist])
# write requested files
for dfmt in fmtlist:
fmt = dfmt[1:]
if not outputSelect[fmt]: continue
if fmtlist[dfmt] is None: continue
if SeparateDir:
savedir = os.path.join(outdir,fmt)
else:
savedir = outdir
if not os.path.exists(savedir): os.makedirs(savedir)
# loop over created histgrams (multiple if caked), writing them as requested
for i,h in enumerate(hists):
fname = h.name[5:].replace(' ','_')
try:
fil = os.path.join(savedir,fname)
print('Wrote',h.Export(fil,dfmt))
except Exception as msg:
print('Failed to write {} as {}. Error msg\n{}'
.format(fname,dfmt,msg))
if ComputePDF: # compute PDF
for h in hists:
pdf = gpxout.copy_PDF(PDFdict,h)
pdf.data['PDF Controls']['Sample']['Name'] = h.name
xydata['Sample'] = h.data['data']
fname = h.name[5:].replace(' ','_')
limits = h.data['Limits'][1]
inst = h.data['Instrument Parameters'][0]
pdf.calculate(copy.deepcopy(xydata),limits,inst)
if optPDF:
for i in range(5):
if pdf.optimize(True,5,copy.deepcopy(xydata),limits,inst):
break
pdf.calculate(copy.deepcopy(xydata),limits,inst)
for fmt in PDFformats:
if not outputSelect[fmt]: continue
if SeparateDir:
savedir = os.path.join(outdir,fmt.replace("(","_").replace(")",""))
else:
savedir = outdir
pdf.export(os.path.join(savedir,fname),fmt)
if outputSelect.get('gpx'):
gpxout.save()
else:
del gpxout
# Autointegration end
def SetupInterpolation(dlg):
'''Creates an object for interpolating image parameters at a given distance value
'''
parms = dlg.ReadImageParmTable()
IMfileList = dlg.IMfileList
cols = dlg.list.GetColumnCount()
ParmList = dlg.ParmList
nonInterpVars = dlg.nonInterpVars
ControlsTable = {}
MaskTable = {}
for f,m in zip(IMfileList,parms[-1]):
n = os.path.split(f)[1]
if n in ControlsTable:
print('Warning overwriting entry {}'.format(n))
ControlsTable[n] = G2imG.ReadControls(f)
if m and os.path.exists(m):
MaskTable[n] = G2imG.ReadMask(m)
elif m != "(none)":
print("Error: Mask file {} not found".format(m))
return copy.deepcopy([cols, parms, IMfileList, ParmList, nonInterpVars,ControlsTable,MaskTable])
def LookupFromTable(dist,parmList):
'''Interpolate image parameters for a supplied distance value
:param float dist: distance to use for interpolation
:returns: a list with 2 items:
* a dict with interpolated parameter values,
* the closest imctrl
'''
cols, parms, IMfileList, ParmList, nonInterpVars,ControlsTable,MaskTable = parmList
x = np.array([float(i) for i in parms[0]])
closest = abs(x-dist).argmin()
D = {'setdist':dist}
imctfile = IMfileList[closest]
for c in range(1,cols-1):
lbl = ParmList[c]
if lbl in nonInterpVars:
if lbl in ['outChannels',]:
D[lbl] = int(float(parms[c][closest]))
else:
D[lbl] = float(parms[c][closest])
else:
y = np.array([float(i) for i in parms[c]])
D[lbl] = np.interp(dist,x,y)
# full integration when angular range is 0
D['fullIntegrate'] = (D['LRazimuth_min'] == D['LRazimuth_max'])
# conversion for paired values
for a,b in ('center_x','center_y'),('LRazimuth_min','LRazimuth_max'),('IOtth_min','IOtth_max'):
r = a.split('_')[0]
D[r] = [D[a],D[b]]
if r in ['LRazimuth',]:
D[r] = [int(D[a]),int(D[b])]
del D[a]
del D[b]
interpDict,imgctrl = D,imctfile
if GSASIIpath.GetConfigValue('debug'):
print ('DBG_interpolated values: ',interpDict)
f = os.path.split(imgctrl)[1]
ImageControls = ControlsTable[f]
ImageControls.update(interpDict)
ImageControls['showLines'] = True
ImageControls['ring'] = []
ImageControls['rings'] = []
ImageControls['ellipses'] = []
ImageControls['setDefault'] = False
for i in 'range','size','GonioAngles':
if i in ImageControls: del ImageControls[i]
ImageMasks = MaskTable.get(f)
return ImageControls,ImageMasks
###########################################################################
if __name__ == "__main__":
GSASIIpath.InvokeDebugOpts()
App = wx.App()
class dummyClass(object):
'''An empty class where a few values needed from parent are placed
'''
G2frame = dummyClass()
frm = AutoIntFrame(G2frame,5)
App.GetTopWindow().Show(True)
App.MainLoop()
| [
7061,
6,
40566,
12,
20270,
26681,
1921,
12,
3978,
1912,
8295,
12,
18908,
1358,
1430,
351,
10926,
198,
40156,
11,
645,
32704,
475,
5292,
284,
3494,
2383,
2974,
286,
220,
198,
1845,
29363,
1634,
13,
198,
7061,
6,
198,
2,
5231,
1563,
1... | 2.104858 | 8,316 |
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
def VGG_16(weights_path=None, in_shape = (3,224,224), out_classes = 1000):
"""
Defines the VGG conv net model from the Visual Geometry Group at Oxford, which had extremely good
performance on the ImageNet ILSVRC-2014 competition. For default input and output sizes, there
are pre-trained model weights which we can use for experimentation. See
http://www.robots.ox.ac.uk/~vgg/research/very_deep/ for more details.
The model is written as a Keras Sequential CNN, with relu activations and softmax on the outputs. It
has 3x3 convolution kernels, stride 2 max pooling layers, and zero padding layers, with two dropout
layers just prior to output.
"""
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=in_shape))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), stride=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), stride=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), stride=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), stride=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), stride=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(out_classes, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
| [
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
13,
7295,
1330,
1610,
41769,
11,
360,
1072,
11,
14258,
448,
198,
6738,
41927,
292,
13,
75,
6962,
13,
42946,
2122,
282,
1330,
34872,
2122,
17,
... | 2.437611 | 1,122 |
'''This module weather.py handles the weather updates receiving
part of the program. This means that it uses the APIkeys to be able
to get the newest weather updates for the notifications, and also the weather brief.'''
import json
import requests
def get_weather() -> dict:
'''This function allows the program to grab the weather updates for
the region chosen and display them back to the user in a notification.'''
weatherdict = {}
base_url = "http://api.openweathermap.org/data/2.5/weather?q="
with open('config.json') as json_file:
data = json.load(json_file)
moredata = data['weather']
api_key = moredata['api key']
city_name = moredata['city']
complete_url = base_url + city_name + "&appid=" + api_key
response = requests.get(complete_url)
weatherdata = response.json()
maindata = weatherdata["main"]
current_temperature = round(int(maindata["temp"]) - 273.15)
feels_like_temp = round(int(maindata["feels_like"]) - 273.15)
moredata = weatherdata["weather"]
location = weatherdata["name"]
weather_description = moredata[0]["description"]
weatherdict["title"] = 'Weather Update'
weatherdict["content"] = (" Temperature (in celsius) = " +
str(current_temperature) +
"\n Feels like temperature (in celsius) = " +
str(feels_like_temp) +
"\n Description = " + str(weather_description) +
"\n Location = " + str(location))
return weatherdict
def weatherbrief() -> str:
'''This function allows the program to create a weather update brief
that can then be used in the text to speech announcement function.'''
with open('config.json') as json_file:
data = json.load(json_file)
moredata = data['weather']
api_key = moredata['api key']
city_name = moredata['city']
base_url = "http://api.openweathermap.org/data/2.5/weather?q="
api_key = "25254f9ebb67e1cd28480d0af0cbe238"
city_name = 'Exeter'
complete_url = base_url + city_name + "&appid=" + api_key
response = requests.get(complete_url)
weatherdata = response.json()
maindata = weatherdata["main"]
current_temperature = round(int(maindata["temp"]) - 273.15)
feels_like_temp = round(int(maindata["feels_like"]) - 273.15)
moredata = weatherdata["weather"]
location = weatherdata["name"]
weather_description = moredata[0]["description"]
weatherstring= ("Weather Update"
"Temperature (in celsius) = " +
str(current_temperature) +
"Feels like temperature (in celsius unit) = " +
str(feels_like_temp) +
"Description = " + str(weather_description) +
"Location = " + str(location))
return weatherstring
| [
7061,
6,
1212,
8265,
6193,
13,
9078,
17105,
262,
6193,
5992,
6464,
201,
198,
3911,
286,
262,
1430,
13,
770,
1724,
326,
340,
3544,
262,
7824,
13083,
284,
307,
1498,
201,
198,
1462,
651,
262,
15530,
6193,
5992,
329,
262,
19605,
11,
29... | 2.483376 | 1,173 |
import gui
import network
import audio
import control
import globals
from player import Player
| [
11748,
11774,
198,
11748,
3127,
198,
11748,
6597,
198,
11748,
1630,
198,
198,
11748,
15095,
874,
198,
6738,
2137,
1330,
7853,
628,
628,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 3.264706 | 34 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from torch_scatter import scatter_mean
import sys
sys.path.append('../')
from lib.pointgroup_ops.functions import pointgroup_ops
from model.components import WeightedFocalLoss, CenterLoss, TripletLoss
from model.common import generate_adaptive_heatmap
from model.loss_functions import compute_offset_norm_loss, compute_offset_dir_loss
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
4738,
198,
6738,
28034,
62,
1416,
1436,
1330,
41058,
62,
32604,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
... | 3.434426 | 122 |
#!/bin/python
with open("predictions.txt") as pfd, open("predictions2.txt","w+") as ofd:
for line in pfd:
ofd.write(str(int(line.strip())+1)+"\n")
| [
2,
48443,
8800,
14,
29412,
198,
4480,
1280,
7203,
28764,
9278,
13,
14116,
4943,
355,
279,
16344,
11,
1280,
7203,
28764,
9278,
17,
13,
14116,
2430,
86,
10,
4943,
355,
286,
67,
25,
198,
197,
1640,
1627,
287,
279,
16344,
25,
198,
197,
... | 2.323077 | 65 |
from .common_setup import *
from ..vi import VariationalBayes, conditional_different_points, WhitenedVariationalPosterior
from ..misc import safe_cholesky
import tensorflow_probability as tfp
import tensorflow as tf
from .. import float_type
| [
6738,
764,
11321,
62,
40406,
1330,
1635,
198,
6738,
11485,
8903,
1330,
15965,
864,
15262,
274,
11,
26340,
62,
39799,
62,
13033,
11,
13183,
2945,
23907,
864,
47,
6197,
1504,
198,
6738,
11485,
44374,
1330,
3338,
62,
354,
4316,
2584,
198,
... | 3.61194 | 67 |
from typing import Dict, Optional
from daemon.stores.mixin import AiohttpMixin
from daemon.stores.containers import ContainerStore
class PodStore(ContainerStore, AiohttpMixin):
"""A Store of Pods spawned as Containers by Daemon"""
_kind = 'pod'
async def add_in_partial(
self, uri: str, params: Dict, envs: Optional[Dict] = {}, **kwargs
) -> Dict:
"""Sends `POST` request to `partial-daemon` to create a Pod/Deployment.
:param uri: uri of partial-daemon
:param params: json payload to be sent
:param envs: environment variables to be passed into partial pod
:param kwargs: keyword args
:return: response from mini-jinad
"""
return await self.POST(
url=f'{uri}/{self._kind}',
params=None,
json={self._kind: params, 'envs': envs},
)
async def delete_in_partial(self, uri, **kwargs) -> Dict:
"""Sends a `DELETE` request to `partial-daemon` to terminate a Pod/Deployment
:param uri: uri of partial-daemon
:param kwargs: keyword args
:return: response from partial-daemon
"""
return await self.DELETE(url=f'{uri}/{self._kind}')
| [
6738,
19720,
1330,
360,
713,
11,
32233,
198,
198,
6738,
33386,
13,
43409,
13,
19816,
259,
1330,
317,
952,
4023,
35608,
259,
198,
6738,
33386,
13,
43409,
13,
3642,
50221,
1330,
43101,
22658,
628,
198,
4871,
17437,
22658,
7,
29869,
22658,... | 2.415507 | 503 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.models import Group
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
1908,
563,
13,
9288,
26791,
1330,
3486,
2043,
395,
20448,
11,
5489,
22013,
14402,
20448,
198,
6738,
... | 3.521739 | 69 |
from django.db import models
import os
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
28686,
628,
198
] | 3.416667 | 12 |
"""Python Cookbook
Chapter 8, recipe 2.
"""
from pprint import pprint
log_rows = \
[['date', 'engine on', 'fuel height'],
['', 'engine off', 'fuel height'],
['', 'Other notes', ''],
['10/25/13', '08:24:00 AM', '29'],
['', '01:15:00 PM', '27'],
['', "calm seas -- anchor solomon's island", ''],
['10/26/13', '09:12:00 AM', '27'],
['', '06:25:00 PM', '22'],
['', "choppy -- anchor in jackson's creek", '']]
import datetime
from types import SimpleNamespace
__test__ = {
'row_merge': '''
>>> pprint(list(row_merge(log_rows)))
[['date',
'engine on',
'fuel height',
'',
'engine off',
'fuel height',
'',
'Other notes',
''],
['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
''],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'']]
''',
'skip_header_1': '''
>>> rm = row_merge(log_rows)
>>> tail = skip_header_1(rm)
>>> pprint(list(tail))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
''],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'']]
''',
'skip_header_date': '''
>>> rm = row_merge(log_rows)
>>> tail = skip_header_date(rm)
>>> pprint(list(tail))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
''],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'']]
''',
'start_time': '''
>>> rm = row_merge(log_rows)
>>> tail = skip_header_date(rm)
>>> st = (start_datetime(row) for row in tail)
>>> pprint(list(st))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
'',
datetime.datetime(2013, 10, 25, 8, 24)],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'',
datetime.datetime(2013, 10, 26, 9, 12)]]
''',
'start_time, end_time': '''
>>> rm = row_merge(log_rows)
>>> tail = skip_header_date(rm)
>>> st = (start_datetime(row) for row in tail)
>>> et = (end_datetime(row) for row in st)
>>> pprint(list(et))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
'',
datetime.datetime(2013, 10, 25, 8, 24),
datetime.datetime(2013, 10, 25, 13, 15)],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'',
datetime.datetime(2013, 10, 26, 9, 12),
datetime.datetime(2013, 10, 26, 18, 25)]]
''',
'start_time, end_time, duration': '''
>>> rm = row_merge(log_rows)
>>> tail = skip_header_date(rm)
>>> st = (start_datetime(row) for row in tail)
>>> et = (end_datetime(row) for row in st)
>>> d = (duration(row) for row in et)
>>> pprint(list(d))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
'',
datetime.datetime(2013, 10, 25, 8, 24),
datetime.datetime(2013, 10, 25, 13, 15),
4.8],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'',
datetime.datetime(2013, 10, 26, 9, 12),
datetime.datetime(2013, 10, 26, 18, 25),
9.2]]
''',
'date_conversion': '''
>>> converted = date_conversion(row_merge(log_rows))
>>> pprint(list(converted))
[['10/25/13',
'08:24:00 AM',
'29',
'',
'01:15:00 PM',
'27',
'',
"calm seas -- anchor solomon's island",
'',
datetime.datetime(2013, 10, 25, 8, 24),
datetime.datetime(2013, 10, 25, 13, 15),
4.8],
['10/26/13',
'09:12:00 AM',
'27',
'',
'06:25:00 PM',
'22',
'',
"choppy -- anchor in jackson's creek",
'',
datetime.datetime(2013, 10, 26, 9, 12),
datetime.datetime(2013, 10, 26, 18, 25),
9.2]]
''',
'namespace': '''
>>> pprint(list(make_namespace(row_merge(log_rows)))) # doctest: +NORMALIZE_WHITESPACE
[namespace(date='date', end_fuel_height='fuel height',
end_time='engine off', other_notes='Other notes',
start_fuel_height='fuel height', start_time='engine on'),
namespace(date='10/25/13', end_fuel_height='27',
end_time='01:15:00 PM', other_notes="calm seas -- anchor solomon's island",
start_fuel_height='29', start_time='08:24:00 AM'),
namespace(date='10/26/13', end_fuel_height='22',
end_time='06:25:00 PM', other_notes="choppy -- anchor in jackson's creek",
start_fuel_height='27', start_time='09:12:00 AM')]
''',
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
37811,
37906,
8261,
2070,
198,
198,
14126,
807,
11,
8364,
362,
13,
198,
37811,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
6404,
62,
8516,
796,
3467,
198,
58,
17816,
4475,
3256,
705,
18392,
319,
3256,
705,
25802,
6001,
6,
4357,
... | 2.239316 | 2,106 |
"""connector.py
Created on: May 19, 2017
Authors: Jeroen van der Heijden <jeroen@cesbit.com>
jomido <https://github.com/jomido>
"""
import os
import json
import aiohttp
from .client_token import Token
from .service_account_token import ServiceAccountToken
from .entity import Entity
from .key import Key
from .utils import make_read_options
DEFAULT_SCOPES = {
'https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/cloud-platform'
}
DEFAULT_API_ENDPOINT = 'https://datastore.googleapis.com'
DATASTORE_URL = \
'{api_endpoint}/v1/projects/{project_id}:{method}'
_MAX_LOOPS = 128
| [
37811,
8443,
273,
13,
9078,
198,
198,
41972,
319,
25,
1737,
678,
11,
2177,
198,
220,
220,
46665,
25,
449,
3529,
268,
5719,
4587,
679,
2926,
6559,
1279,
73,
3529,
268,
31,
728,
2545,
13,
785,
29,
198,
220,
220,
220,
220,
220,
220,
... | 2.602459 | 244 |
#!/usr/bin/env python
import h5py
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
runnr = int(sys.argv[1])
filename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr
with h5py.File(filename, 'r') as f:
hitscore = f['entry_1/result_1/hitscore_litpixel'][:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(hitscore, 'k.')
#ax.axhline(int(sys.argv[2]))
fig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
2... | 2.230126 | 239 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, QPersistentModelIndex, QModelIndex
from PyQt5.QtWidgets import (QAbstractItemView, QMenu)
from electrum_exos.i18n import _
from electrum_exos.bitcoin import is_address
from electrum_exos.util import block_explorer_URL
from electrum_exos.plugin import run_hook
from .util import MyTreeView, import_meta_gui, export_meta_gui, webopen
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
5903,
6582,
532,
18700,
6185,
5456,
198,
2,
15069,
357,
34,
8,
1853,
5658,
20687,
1533,
83,
2815,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
... | 3.502174 | 460 |
# Generated by Django 3.0.6 on 2020-07-13 23:23
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
2998,
12,
1485,
2242,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#generates timestamps
import datetime
#contains hashing algorithms
import hashlib
#defining the 'block' data structure
import time
#defining the blockchain datastructure
#consists of 'blocks' linked together
#to form a 'chain'. Thats why its called
#'blockchain'
blockchain = Blockchain()
#mine 10 blocks
for n in range(10):
blockchain.mine(Block("Block " + str(n+1)))
#print out each block in the blockchain
while blockchain.head != None:
print(blockchain.head)
blockchain.head = blockchain.head.next
time.sleep(500)
| [
2,
8612,
689,
4628,
395,
9430,
201,
198,
11748,
4818,
8079,
201,
198,
2,
3642,
1299,
49544,
16113,
201,
198,
11748,
12234,
8019,
201,
198,
2,
4299,
3191,
262,
705,
9967,
6,
1366,
4645,
201,
198,
11748,
640,
201,
198,
2,
4299,
3191,
... | 2.927461 | 193 |
from interaction_engine.engine import InteractionEngine
| [
6738,
10375,
62,
18392,
13,
18392,
1330,
4225,
2673,
13798,
628
] | 5.181818 | 11 |
# -*- coding: utf-8 -*-
from manim_imports_ext import *
# Scene types
# Scenes
# class Thumbnail(GraphicalIntuitions):
| [
1303,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
582,
320,
62,
320,
3742,
62,
2302,
1330,
1635,
628,
628,
628,
628,
198,
2,
28315,
3858,
628,
198,
198,
2,
49525,
628,
628,
628,
628,
628,
628,
628,
628,
628... | 2.397436 | 78 |
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Created on 2017年12月12日
@Author: Xinpeng
@Description: 用来处理json is not JSON serializable。
'''
import json
import datetime | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
66,
7656,
25,
40477,
12,
23,
12,
9,
12,
198,
198,
7061,
6,
198,
41972,
319,
2177,
33176,
112,
1065,
17312,
230,
1065,
33768,
98,
198,
198,
31,
13838,
25,
25426,
79,... | 2.15 | 80 |
from selia.views.create_views.manager_base import CreateManagerBase
| [
6738,
384,
24660,
13,
33571,
13,
17953,
62,
33571,
13,
37153,
62,
8692,
1330,
13610,
13511,
14881,
628
] | 3.833333 | 18 |
from random import randint as rdi
from math import radians
from compas.geometry import Box
from compas.datastructures import Mesh
from compas.datastructures import mesh_transform_numpy
from compas.utilities import rgb_to_hex
from compas.geometry import Translation
from compas.geometry import Rotation
from compas_viewers.objectviewer import ObjectViewer
viewer = ObjectViewer(activate_selection=True)
# make 10 random meshes
# with random position and orientation
for i in range(10):
T = Translation.from_vector([rdi(0, 10), rdi(0, 10), rdi(0, 5)])
R = Rotation.from_axis_and_angle([0, 0, 1.0], radians(rdi(0, 180)))
X = T * R
box = Box.from_width_height_depth(rdi(1, 3), rdi(1, 3), rdi(1, 3))
mesh = Mesh.from_shape(box)
mesh_transform_numpy(mesh, X)
viewer.add(mesh, name="Mesh.%s"%i, settings={
'color': rgb_to_hex((rdi(0, 255), rdi(0, 255), rdi(0, 255))),
'edges.width': 2,
'opacity': 0.7,
'vertices.size': 10,
'vertices.on': True,
'edges.on': False,
'faces.on': True,
})
viewer.show()
| [
6738,
4738,
1330,
43720,
600,
355,
374,
10989,
198,
198,
6738,
10688,
1330,
2511,
1547,
198,
198,
6738,
552,
292,
13,
469,
15748,
1330,
8315,
198,
6738,
552,
292,
13,
19608,
459,
1356,
942,
1330,
47529,
198,
6738,
552,
292,
13,
19608,... | 2.397802 | 455 |
get_staff_sql = "select is_phen_staff from user_privileges inner join user_registrations on user_privileges.id_user=user_registrations.id_user where user_registrations.email='{0}'"
get_salt_sql = "select salt from user_registrations where email='{0}'"
get_name_passwd_sql = "select username from user_registrations where email='{0}' and password ='{1}'"
| [
1136,
62,
28120,
62,
25410,
796,
366,
19738,
318,
62,
31024,
62,
28120,
422,
2836,
62,
13776,
576,
3212,
8434,
4654,
2836,
62,
2301,
396,
9143,
319,
2836,
62,
13776,
576,
3212,
13,
312,
62,
7220,
28,
7220,
62,
2301,
396,
9143,
13,
... | 2.966667 | 120 |
import re
from typing import Mapping
import requests
bearer_re = r"index.html\?(.*)"
| [
11748,
302,
198,
6738,
19720,
1330,
337,
5912,
198,
198,
11748,
7007,
198,
198,
1350,
11258,
62,
260,
796,
374,
1,
9630,
13,
6494,
59,
30,
7,
15885,
16725,
628
] | 2.933333 | 30 |
import unittest
from unittest.mock import MagicMock
from lmctl.client.client_credentials_auth import ClientCredentialsAuth
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
6738,
300,
76,
34168,
13,
16366,
13,
16366,
62,
66,
445,
14817,
62,
18439,
1330,
20985,
34,
445,
14817,
30515,
198
] | 3.236842 | 38 |
import os
import pytest
import asyncio
import taskloaf.worker
from taskloaf.cluster import cluster
from taskloaf.mpi import mpiexisting, MPIComm, rank
from taskloaf.test_decorators import mpi_procs
from taskloaf.run import run
from fixtures import w
if __name__ == "__main__":
test_log()
@mpi_procs(2)
@mpi_procs(2)
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
30351,
952,
198,
11748,
4876,
5439,
1878,
13,
28816,
198,
6738,
4876,
5439,
1878,
13,
565,
5819,
1330,
13946,
198,
6738,
4876,
5439,
1878,
13,
3149,
72,
1330,
285,
21749,
87,
9665,
11,
... | 2.691667 | 120 |
import math
from pyspark.sql import functions as F
import pyspark.sql.types as T
import unidecode as ud
from faker import Faker
from numpy import random
import binascii
import zlib
from HLL import HyperLogLog
from datafaucet import crypto
from datafaucet.spark import types
from datafaucet.spark import dataframe
array_avg = F.udf(lambda x: sum(x) / len(x))
array_sum = F.udf(lambda x: sum(x))
import pandas as pd
array_std = F.udf(lambda x: std(x))
@F.pandas_udf(T.StringType())
df_functions = (typeof, topn, topn_count, topn_values, percentiles)
null = lambda c: F.sum(c.isNull().cast('int'))
nan = lambda c: F.sum(c.isnan)
integer = lambda c: F.coalesce(F.sum((F.rint(c) == c).cast('int')), F.lit(0))
boolean = lambda c: F.coalesce(F.sum((c.cast('boolean') == F.rint(c)).cast('int')), F.lit(0))
zero = lambda c: F.sum((c == 0).cast('int'))
empty = lambda c: F.sum((F.length(c) == 0).cast('int'))
pos = lambda c: F.sum((c > 0).cast('int'))
neg = lambda c: F.sum((c < 0).cast('int'))
distinct = lambda c: F.countDistinct(c)
one = lambda c: F.first(c, False).cast(T.StringType())
count = F.count
sum = F.sum
sum_pos = lambda c: F.sum(F.when(c > 0, c))
sum_neg = lambda c: F.sum(F.when(c < 0, c))
min = F.min
max = F.max
avg = F.avg
stddev = F.stddev
skewness = F.skewness
kurtosis = F.kurtosis
first = F.first
digits_only = lambda c: F.sum((F.length(F.translate(c, '0123456789', '')) < F.length(c)).cast('int'))
spaces_only = lambda c: F.sum(((F.length(F.translate(c, ' \t', '')) == 0) & (F.length(c) > 0)).cast('int'))
all = {
'typeof': typeof(),
'integer': integer,
'boolean': boolean,
'top3': topn(),
'top3_count': topn_count(),
'top3_values': topn_values(),
'percentiles': percentiles(),
'null': null,
'zero': zero,
'empty': empty,
'pos': pos,
'neg': neg,
'distinct': distinct,
'sum': sum,
'count': count,
'first': first,
'one': one,
'min': min,
'max': max,
'avg': avg,
'stddev': stddev,
'skewness': skewness,
'kurtosis': kurtosis,
'sum_pos': sum_pos,
'sum_neg': sum_neg,
'digits_only': digits_only,
'spaces_only': spaces_only,
}
all_pandas_udf = {
# PyArrow only
'hll_init_agg': hll_init_agg(),
'hll_merge': hll_merge(),
}
| [
11748,
10688,
198,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
5499,
355,
376,
198,
11748,
279,
893,
20928,
13,
25410,
13,
19199,
355,
309,
198,
198,
11748,
555,
485,
8189,
355,
334,
67,
198,
198,
6738,
277,
3110,
1330,
376,
3110,
... | 2.228599 | 1,028 |
from . import Type
from ..support.heading import Heading
from ..support import utils
from ..exceptions import UndressError
| [
6738,
764,
1330,
5994,
198,
6738,
11485,
11284,
13,
33878,
1330,
679,
4980,
198,
6738,
11485,
11284,
1330,
3384,
4487,
198,
6738,
11485,
1069,
11755,
1330,
13794,
601,
12331,
628
] | 4.133333 | 30 |
from .base_classes import Container
class Canvas(Container):
"""Use the HTML `<canvas>` element with either the [canvas scripting
API](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API) or the
[WebGL API](https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API) to
draw graphics and animations.
You may (and should) provide alternate content inside the `<canvas>`
block. That content will be rendered both on older browsers that don't
support canvas and in browsers with JavaScript disabled. Providing a
useful fallback text or sub DOM helps to make the the canvas more
accessible.
"""
__slots__ = ()
tag = "canvas"
class NoScript(Container):
"""The HTML `<noscript>` element defines a section of HTML to be inserted
if a script type on the page is unsupported or if scripting is currently
turned off in the browser.
"""
__slots__ = ()
tag = "noscript"
class Script(Container):
"""The HTML `<script>` element is used to embed executable code or data;
this is typically used to embed or refer to JavaScript code.
"""
__slots__ = ()
tag = "script"
| [
6738,
764,
8692,
62,
37724,
1330,
43101,
628,
198,
4871,
1680,
11017,
7,
29869,
2599,
198,
220,
220,
220,
37227,
11041,
262,
11532,
4600,
27,
5171,
11017,
29,
63,
5002,
351,
2035,
262,
685,
5171,
11017,
36883,
198,
220,
220,
220,
7824... | 3.12938 | 371 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.network import data_models as network_data_models
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.common import neutron_lbaas_data_models as n_data_models
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy import haproxy_driver_constants
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy.local_cert_manager import LocalCertManager
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy.rest_api_driver import HaproxyAmphoraLoadBalancerDriver
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import lb_constants
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants
from gbpservice.nfp.common import exceptions
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
# Copy from loadbalancer/v1/haproxy/haproxy_lb_driver.py
""" Loadbalancer generic configuration driver for handling device
configuration requests.
"""
class LbGenericConfigDriver(object):
"""
Driver class for implementing loadbalancer configuration
requests from Orchestrator.
"""
def configure_interfaces(self, context, resource_data):
""" Configure interfaces for the service VM.
Calls static IP configuration function and implements
persistent rule addition in the service VM.
Issues REST call to service VM for configuration of interfaces.
:param context: neutron context
:param resource_data: a dictionary of loadbalancer objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
resource_data = self.parse.parse_data(
common_const.INTERFACES, resource_data)
mgmt_ip = resource_data['mgmt_ip']
try:
result_log_forward = self._configure_log_forwarding(
lb_constants.REQUEST_URL, mgmt_ip,
self.port)
except Exception as err:
msg = ("Failed to configure log forwarding for service at %s. "
"Error: %s" % (mgmt_ip, err))
LOG.error(msg)
return msg
else:
if result_log_forward == common_const.UNHANDLED:
pass
elif result_log_forward != lb_constants.STATUS_SUCCESS:
msg = ("Failed to configure log forwarding for service at %s. "
% mgmt_ip)
LOG.error(msg)
return result_log_forward
else:
msg = ("Configured log forwarding for service at %s. "
"Result: %s" % (mgmt_ip, result_log_forward))
LOG.info(msg)
return lb_constants.STATUS_SUCCESS
# As we use the rest client and amphora image from Octavia,
# we need to have a helper class to simulate Octavia DB operation
# in order to get Octavia data models from Neutron-lbaas data models
# All Octavia data models have these attributes
# Update Octavia model from dict
# Translate loadbalancer neutron model dict to octavia model
# Translate listener neutron model dict to octavia model
# Translate pool neutron model dict to octavia model
# Translate member neutron model dict to octavia model
# Translate HealthMonitor neutron model dict to octavia model
@base_driver.set_class_attr(
SERVICE_TYPE=lbv2_constants.SERVICE_TYPE,
SERVICE_VENDOR=haproxy_driver_constants.SERVICE_VENDOR)
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 2.717216 | 1,609 |
import argparse
import collections
import glob
import gzip
import heapq
import itertools
import json
import math
import multiprocessing
import os
import random
import re
import shutil
import subprocess
import sys
import time
from robustcode.analysis.graph import AstNode
from robustcode.parsers.parser import parse_file_server
from robustcode.util.misc import is_file_empty
from robustcode.util.misc import Logger
"""
Optimize project dependencies
Each file has a list of dependencies required by the type inference.
This is however just an overapproximation which includes many files that are not used.
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
"""
Optimize number of dependencies required for type inference.
Useful to speed-up type inference if the files are re-evaluated as part of adversarial search.
"""
"""
Collect and install npm packages
"""
"""
Split dataset into train/valid/test
"""
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
15095,
198,
11748,
308,
13344,
198,
11748,
24575,
80,
198,
11748,
340,
861,
10141,
198,
11748,
33918,
198,
11748,
10688,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
11748,
4... | 3.453947 | 304 |